diff --git a/appscale/tools/appscale_tools.py b/appscale/tools/appscale_tools.py index c79c4f09..1928e812 100644 --- a/appscale/tools/appscale_tools.py +++ b/appscale/tools/appscale_tools.py @@ -33,6 +33,7 @@ from appscale.tools.node_layout import NodeLayout from appscale.tools.remote_helper import RemoteHelper +from appscale.agents.base_agent import BaseAgent from appscale.agents.factory import InfrastructureAgentFactory @@ -773,7 +774,10 @@ def run_instances(cls, options): node_layout = NodeLayout(options) if options.infrastructure: - if (not options.test and not options.force and + disk_auto = InfrastructureAgentFactory.agent_has_flag( + options.infrastructure, BaseAgent.FLAG_DISK_AUTO) + + if (not options.test and not options.force and not disk_auto and not (options.disks or node_layout.are_disks_used())): LocalState.ensure_user_wants_to_run_without_disks() diff --git a/appscale/tools/local_state.py b/appscale/tools/local_state.py index 26a00578..01b596b8 100644 --- a/appscale/tools/local_state.py +++ b/appscale/tools/local_state.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +from __future__ import absolute_import # First-party Python imports import fnmatch @@ -29,6 +30,8 @@ from .custom_exceptions import BadConfigurationException from .custom_exceptions import ShellException +from appscale.agents.base_agent import BaseAgent +from appscale.agents.factory import InfrastructureAgentFactory # The version of the AppScale Tools we're running on. APPSCALE_VERSION = "3.8.1" @@ -245,6 +248,8 @@ def generate_deployment_params(cls, options, node_layout, additional_creds): iaas_creds['EC2_ACCESS_KEY'] = options.EC2_ACCESS_KEY iaas_creds['EC2_SECRET_KEY'] = options.EC2_SECRET_KEY iaas_creds['EC2_URL'] = options.EC2_URL + elif options.infrastructure in ['ec2t']: + iaas_creds['aws_launch_template_id'] = options.aws_launch_template_id elif options.infrastructure == 'azure': iaas_creds['azure_subscription_id'] = options.azure_subscription_id iaas_creds['azure_app_id'] = options.azure_app_id @@ -407,11 +412,12 @@ def update_local_metadata(cls, options, db_master, head_node): # write our yaml metadata file appscalefile_contents = { 'infrastructure' : infrastructure, - 'group' : options.group, } - if infrastructure != 'xen': - appscalefile_contents['zone'] = options.zone + if infrastructure != 'ec2t': + appscalefile_contents['group'] = options.group + if infrastructure != 'xen': + appscalefile_contents['zone'] = options.zone if infrastructure == 'gce': appscalefile_contents['project'] = options.project @@ -419,6 +425,8 @@ def update_local_metadata(cls, options, db_master, head_node): appscalefile_contents['EC2_ACCESS_KEY'] = options.EC2_ACCESS_KEY appscalefile_contents['EC2_SECRET_KEY'] = options.EC2_SECRET_KEY appscalefile_contents['EC2_URL'] = options.EC2_URL + elif infrastructure == 'ec2t': + appscalefile_contents['aws_launch_template_id'] = options.aws_launch_template_id elif infrastructure == 'azure': appscalefile_contents['azure_subscription_id'] = options.azure_subscription_id appscalefile_contents['azure_app_id'] = options.azure_app_id @@ -1244,6 +1252,13 @@ def ensure_appscalefile_is_up_to_date(cls): if 'keyname' in yaml_contents and 'group' in yaml_contents: return True + # Update AppScalefile if agent requires it + if ('infrastructure' in yaml_contents and + InfrastructureAgentFactory.agent_has_flag( + yaml_contents['infrastructure'], + BaseAgent.FLAG_KEY_AUTO)): + return True + file_contents += "\n# Automatically added by the AppScale Tools: " random_suffix = str(uuid.uuid4()).replace('-', '') diff --git a/appscale/tools/node_layout.py b/appscale/tools/node_layout.py index d74e7dde..e99f5411 100644 --- a/appscale/tools/node_layout.py +++ b/appscale/tools/node_layout.py @@ -8,6 +8,7 @@ # AppScale-specific imports +from appscale.agents.base_agent import BaseAgent from appscale.agents.factory import InfrastructureAgentFactory from .appscale_logger import AppScaleLogger from .custom_exceptions import BadConfigurationException @@ -272,7 +273,8 @@ def validate_node_layout(self): instance_type = node_set.get('instance_type', self.default_instance_type) if self.infrastructure: - if not instance_type: + if not instance_type and not InfrastructureAgentFactory.agent_has_flag( + self.infrastructure, BaseAgent.FLAG_INSTANCE_TYPE_AUTO): self.invalid("Must set a default instance type or specify instance " "type per role.") diff --git a/appscale/tools/parse_args.py b/appscale/tools/parse_args.py index 4ca26900..5b6b83a6 100644 --- a/appscale/tools/parse_args.py +++ b/appscale/tools/parse_args.py @@ -249,6 +249,8 @@ def add_allowed_flags(self, function): help="the id for the vpc in the aws region to spawn instances in.") self.parser.add_argument('--aws_subnet_id', help="the id for the subnet in the aws region to spawn instances in.") + self.parser.add_argument('--aws_launch_template_id', + help="the id for the launch template in the aws region to spawn instances in.") # Google Compute Engine-specific flags gce_group = self.parser.add_mutually_exclusive_group() @@ -669,6 +671,13 @@ def validate_infrastructure_flags(self): return + # Verify ec2t settings + if self.args.infrastructure == 'ec2t': + if not self.args.aws_launch_template_id: + raise BadConfigurationException(("Need a launch template id" + " (aws_launch_template_id) when using the ec2t agent")) + return + # Make sure the user gave us an ami/emi if running in a cloud. if not self.args.machine: raise BadConfigurationException("Need a machine image (ami) " + diff --git a/appscale/tools/remote_helper.py b/appscale/tools/remote_helper.py index b8b87b50..c2066854 100644 --- a/appscale/tools/remote_helper.py +++ b/appscale/tools/remote_helper.py @@ -24,7 +24,8 @@ from .custom_exceptions import BadConfigurationException from .custom_exceptions import ShellException from .custom_exceptions import TimeoutException -from appscale.agents.base_agent import AgentRuntimeException +from appscale.agents.base_agent import ( + AgentRuntimeException, BaseAgent) from appscale.agents.gce_agent import CredentialTypes from appscale.agents.gce_agent import GCEAgent from appscale.agents.factory import InfrastructureAgentFactory @@ -250,6 +251,11 @@ def enable_root_ssh(cls, options, public_ip): AppScaleLogger.log("Enabling root ssh on {0}".format(public_ip)) cls.sleep_until_port_is_open(public_ip, cls.SSH_PORT) + if InfrastructureAgentFactory.agent_has_flag( + options.infrastructure, BaseAgent.FLAG_SSH_AUTO): + cls.sleep_until_ssh_is_configured(public_ip, options.keyname) + return + cls.enable_root_login(public_ip, options.keyname, options.infrastructure) cls.copy_ssh_keys_to_node(public_ip, options.keyname) @@ -376,6 +382,31 @@ def sleep_until_port_is_open(cls, host, port, is_verbose=None): raise TimeoutException("Port {}:{} did not open in time. " "Aborting...".format(host, port)) + @classmethod + def sleep_until_ssh_is_configured(cls, host, keyname, is_verbose=None): + """Queries the given host to see if SSH is open, and if not, + waits until it is. + + Args: + host: A str representing the machine that we should log into. + keyname: A str representing the name of the SSH keypair to log in with. + is_verbose: A bool indicating verbose logging + Raises: + TimeoutException if SSH does not open in a certain amount of time. + """ + sleep_time = cls.MAX_WAIT_TIME + while sleep_time > 0: + try: + cls.ssh(host, keyname, 'ls /', is_verbose) + return + except ShellException: + pass # Wait and retry + AppScaleLogger.verbose("Waiting {1} second(s) for {0} to open" + .format(host, cls.WAIT_TIME), is_verbose) + time.sleep(cls.WAIT_TIME) + sleep_time -= cls.WAIT_TIME + raise TimeoutException("Host SSH {} did not open in time. " + "Aborting...".format(host)) @classmethod def is_port_open(cls, host, port, is_verbose=None): diff --git a/test/test_appscale_logger.py b/test/test_appscale_logger.py index 755188cb..5c7eb0fe 100644 --- a/test/test_appscale_logger.py +++ b/test/test_appscale_logger.py @@ -82,6 +82,7 @@ def setUp(self): "verbose" : False, "version" : False, "zone" : "my-zone-1b", + "aws_launch_template_id" : None, "aws_subnet_id" : None, "aws_vpc_id" : None, "azure_subscription_id" : None,