From 0e8827d2c076dff691cd65641bcbcc380e0d9739 Mon Sep 17 00:00:00 2001 From: akhon Date: Wed, 13 Apr 2022 18:59:40 -0700 Subject: [PATCH] moved aws-rollout script to logger instead of printing --- .../aws-autoscaling-rollout.py | 307 +++++++++--------- 1 file changed, 150 insertions(+), 157 deletions(-) diff --git a/aws-autoscaling-rollout/aws-autoscaling-rollout.py b/aws-autoscaling-rollout/aws-autoscaling-rollout.py index ec827f7..1e8b48f 100755 --- a/aws-autoscaling-rollout/aws-autoscaling-rollout.py +++ b/aws-autoscaling-rollout/aws-autoscaling-rollout.py @@ -31,8 +31,8 @@ ###################### import boto3 import time -# For debugging -from pprint import pprint +import os +import logging # For CLI Parsing of args from optparse import OptionParser # This is for the pre/post external health check feature @@ -49,6 +49,11 @@ elbv2 = boto3.client('elbv2', region_name='eu-west-1') +LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO') +logging.basicConfig(level=LOG_LEVEL, format='%(levelname)s: %(asctime)s: %(message)s') +logger = logging.getLogger(__name__) +logger.setLevel(logging.DEBUG) + ###################### # CLI Argument handling ###################### @@ -96,13 +101,13 @@ # Startup simple checks... if options.autoscaler == "": - print("ERROR: You MUST specify the autoscaler with -a") + logger.info("ERROR: You MUST specify the autoscaler with -a") parser.print_usage() exit(1) if options.force: - print("ALERT: We are force-deploying this autoscaler, which may cause downtime under some circumstances") + logger.info("ALERT: We are force-deploying this autoscaler, which may cause downtime under some circumstances") if options.skip: - print("ALERT: We are skipping ELB health checks of new instances as they come up, this will probably cause downtime") + logger.info("ALERT: We are skipping ELB health checks of new instances as they come up, this will probably cause downtime") ###################### @@ -164,7 +169,7 @@ def update_auto_scaling_group_max_size( autoscaling_group_name, max_size ): if response['ResponseMetadata']['HTTPStatusCode'] == 200: return True else: - print("ERROR: Unable to set max autoscaling group size on '" + autoscaling_group_name + "'") + logger.info("ERROR: Unable to set max autoscaling group size on '" + autoscaling_group_name + "'") return False # Get target group @@ -281,7 +286,7 @@ def suspend_processes( autoscaling_group_name, processes_to_suspend ): if response['ResponseMetadata']['HTTPStatusCode'] == 200: return True else: - print("ERROR: Unable to suspend_processes on '" + autoscaling_group_name + "'") + logger.info("ERROR: Unable to suspend_processes on '" + autoscaling_group_name + "'") return False @@ -294,7 +299,7 @@ def resume_processes( autoscaling_group_name, processes_to_resume ): if response['ResponseMetadata']['HTTPStatusCode'] == 200: return True else: - print("ERROR: Unable to resume_processes on '" + autoscaling_group_name + "'") + logger.info("ERROR: Unable to resume_processes on '" + autoscaling_group_name + "'") return False @@ -305,7 +310,7 @@ def resume_all_processes( autoscaling_group_name ): if response['ResponseMetadata']['HTTPStatusCode'] == 200: return True else: - print("ERROR: Unable to resume_all_processes on '" + autoscaling_group_name + "'") + logger.info("ERROR: Unable to resume_all_processes on '" + autoscaling_group_name + "'") return False @@ -321,14 +326,14 @@ def check_if_autoscaler_is_scaling( autoscaling_group_name ): # Quick error checking if len(autoscaler['AutoScalingGroups']) != 1: - print("ERROR: Unable to get describe autoscaling group: " + autoscaling_group_name) + logger.info("ERROR: Unable to get describe autoscaling group: " + autoscaling_group_name) exit(1) autoscaler = autoscaler['AutoScalingGroups'][0] # Check if our healthy instance count matches our desired capacity healthy_instance_count = get_number_of_autoscaler_healthy_instances( autoscaler ) if healthy_instance_count != autoscaler['DesiredCapacity']: - print("INFO: Our autoscaler must be scaling, desired " + str(autoscaler['DesiredCapacity']) + ", healthy instances " + str(healthy_instance_count)) + logger.info("INFO: Our autoscaler must be scaling, desired " + str(autoscaler['DesiredCapacity']) + ", healthy instances " + str(healthy_instance_count)) return True return False @@ -346,7 +351,7 @@ def deregister_instance_from_load_balancer( instance_id, loadbalancer_name ): if response['ResponseMetadata']['HTTPStatusCode'] == 200: return True else: - print("ERROR: Unable to deregister instance '" + instance_id + "' from load balancer '" + loadbalancer_name + "'") + logger.info("ERROR: Unable to deregister instance '" + instance_id + "' from load balancer '" + loadbalancer_name + "'") return False @@ -362,7 +367,7 @@ def deregister_instance_from_target_group( instance_id, target_group_arn ): if response['ResponseMetadata']['HTTPStatusCode'] == 200: return True else: - print("ERROR: Unable to deregister instance '" + instance_id + "' from load balancer '" + loadbalancer_name + "'") + logger.info("ERROR: Unable to deregister instance '" + instance_id + "' from load balancer '" + loadbalancer_name + "'") return False @@ -377,13 +382,13 @@ def wait_for_autoscaler_to_have_healthy_desired_instances( autoscaling_group_nam while True: healthy_instance_count = int(get_number_of_autoscaler_healthy_instances( autoscaler_description['AutoScalingGroupName'] )) if desired_capacity != healthy_instance_count: - print("WARNING: We have " + str(healthy_instance_count) + " healthy instances on the autoscaler but we want " + str(desired_capacity)) + logger.info("WARNING: We have " + str(healthy_instance_count) + " healthy instances on the autoscaler but we want " + str(desired_capacity)) elif check_if_autoscaler_is_scaling( autoscaler_description['AutoScalingGroupName'] ): - print("WARNING: We are currently performing some autoscaling, we should wait...") + logger.info("WARNING: We are currently performing some autoscaling, we should wait...") else: - print("SUCCESS: We currently have desired capacity of " + str(desired_capacity) + " on this autoscaler") + logger.info("SUCCESS: We currently have desired capacity of " + str(desired_capacity) + " on this autoscaler") break - print("Waiting for 5 seconds...") + logger.info("Waiting for 5 seconds...") time.sleep(5) @@ -407,7 +412,7 @@ def get_autoscaler_healthy_instances( autoscaling_group_name_or_definition ): def terminate_instance_in_auto_scaling_group( instance_id, autoscaling_group_name, decrement_capacity=False ): - print("DEBUG: Terminating instance '" + instance_id + "' from the autoscaling group '" + autoscaling_group_name + "'...") + logger.info("Terminating instance '" + instance_id + "' from the autoscaling group '" + autoscaling_group_name + "'...") if decrement_capacity is True: response = autoscaling.terminate_instance_in_auto_scaling_group( @@ -421,15 +426,15 @@ def terminate_instance_in_auto_scaling_group( instance_id, autoscaling_group_nam ) if response['ResponseMetadata']['HTTPStatusCode'] == 200: - print("DEBUG: Executed okay") + logger.info("Executed okay") return True else: - print("ERROR: Unable to detach autoscaler '" + autoscaling_group_name + "' from the load balancer '" + loadbalancer_name) + logger.info("ERROR: Unable to detach autoscaler '" + autoscaling_group_name + "' from the load balancer '" + loadbalancer_name) exit(1) def set_desired_capacity( autoscaling_group_name, desired_capacity ): - print("DEBUG: Setting desired capacity of '" + autoscaling_group_name + "' to '" + str(desired_capacity) + "'...") + logger.info("Setting desired capacity of '" + autoscaling_group_name + "' to '" + str(desired_capacity) + "'...") response = autoscaling.set_desired_capacity( AutoScalingGroupName=autoscaling_group_name, DesiredCapacity=desired_capacity, @@ -438,10 +443,10 @@ def set_desired_capacity( autoscaling_group_name, desired_capacity ): # Check if this executed okay... if response['ResponseMetadata']['HTTPStatusCode'] == 200: - print("DEBUG: Executed okay") + logger.info("Executed okay") return True else: - print("ERROR: Unable to set_desired_capacity on '" + autoscaling_group_name + "'") + logger.info("ERROR: Unable to set_desired_capacity on '" + autoscaling_group_name + "'") exit(1) @@ -471,51 +476,51 @@ def get_instance_ids_of_load_balancer( loadbalancer_name_or_definition ): def wait_for_complete_targetgroup_autoscaler_attachment( target_group_arn, autoscaling_group_name ): - print("DEBUG: Waiting for attachment of autoscaler " + autoscaling_group_name + " to target_group_arn: " + target_group_arn) + logger.info("Waiting for attachment of autoscaler " + autoscaling_group_name + " to target_group_arn: " + target_group_arn) while True: # Get instances from target group - print("DEBUG: Getting target group instances") + logger.info("Getting target group instances") target_group = elbv2.describe_target_health( TargetGroupArn=target_group_arn ) # Get healthy instance ids from target group - print("DEBUG: Getting instance ids from load balancer") + logger.info("Getting instance ids from load balancer") instance_health_flat = [] for instance in target_group['TargetHealthDescriptions']: if (instance['TargetHealth']['State'] == 'healthy'): instance_health_flat.append(instance['Target']['Id']) # Get our healthy instances from our autoscaler - print("DEBUG: Getting healthy instances on our autoscaler") + logger.info("Getting healthy instances on our autoscaler") autoscaler = get_autoscaling_group( autoscaling_group_name ) as_instances = get_autoscaler_healthy_instances( autoscaler ) successes = 0 for instance in as_instances: if instance['InstanceId'] in instance_health_flat: - print("DEBUG: SUCCESS - Instance " + instance['InstanceId'] + " is healthy in our target group") + logger.info("SUCCESS - Instance " + instance['InstanceId'] + " is healthy in our target group") successes = successes + 1 else: - print("DEBUG: FAIL - Instance " + instance['InstanceId'] + " is unhealthy or not present in our target group") + logger.info("FAIL - Instance " + instance['InstanceId'] + " is unhealthy or not present in our target group") if successes >= len(as_instances): if int(autoscaler['DesiredCapacity']) == successes: - print("DEBUG: We have " + str(successes) + " healthy instances on the target group and on the ASG") + logger.info("We have " + str(successes) + " healthy instances on the target group and on the ASG") break else: - print("DEBUG: FAIL - We have " + str(successes) + " healthy instances on the target group but we have desired instances set to " + str(autoscaler['DesiredCapacity']) + " on the ASG") + logger.info("FAIL - We have " + str(successes) + " healthy instances on the target group but we have desired instances set to " + str(autoscaler['DesiredCapacity']) + " on the ASG") else: - print("WAIT: Found " + str(successes) + " healthy instances on the target group from the ASG " + str(autoscaler['DesiredCapacity']) + " to continue. Waiting 10 seconds...") + logger.info("Found " + str(successes) + " healthy instances on the target group from the ASG " + str(autoscaler['DesiredCapacity']) + " to continue. Waiting 10 seconds...") time.sleep( 10 ) def wait_for_instances_to_detach_from_loadbalancer( instance_ids, loadbalancer_name ): - print("DEBUG: Waiting for detachment of instance_ids ") - print(instance_ids) - print(" from load balancer:" + loadbalancer_name) + logger.info("Waiting for detachment of instance_ids ") + logger.info(instance_ids) + logger.info(" from load balancer:" + loadbalancer_name) while True: loadbalancer = get_load_balancer(loadbalancer_name) @@ -523,95 +528,95 @@ def wait_for_instances_to_detach_from_loadbalancer( instance_ids, loadbalancer_n failures = 0 for instance in instance_ids: - print(" DEBUG: Checking if " + instance + " is attached to load balancer...") + logger.info(" Checking if " + instance + " is attached to load balancer...") if instance in lb_instances: - print(" ERROR: Currently attached to the load balancer...") + logger.info(" ERROR: Currently attached to the load balancer...") failures = failures + 1 else: - print(" SUCCESS: Instance is not attached to the load balancer") + logger.info(" SUCCESS: Instance is not attached to the load balancer") if failures == 0: - print("SUCCESS: Done waiting for detachment of instance ids") + logger.info("SUCCESS: Done waiting for detachment of instance ids") break - print("DEBUG: Waiting for 10 seconds and trying again...") + logger.info("Waiting for 10 seconds and trying again...") time.sleep( 10 ) - print("DEBUG: DONE waiting for detachment of instances from " + loadbalancer_name) + logger.info("DONE waiting for detachment of instances from " + loadbalancer_name) def wait_for_instances_to_detach_from_target_group( instance_ids, target_group_arn ): - print("DEBUG: Waiting for detachment of instance_ids ") - print(instance_ids) - print(" from target group:" + target_group_arn) + logger.info("Waiting for detachment of instance_ids ") + logger.info(instance_ids) + logger.info(" from target group:" + target_group_arn) while True: - print("DEBUG: Getting target group instances") + logger.info("Getting target group instances") target_group = elbv2.describe_target_health( TargetGroupArn=target_group_arn ) # Get healthy instance ids from target group - print("DEBUG: Getting instance ids from load balancer") + logger.info("Getting instance ids from load balancer") instance_health_flat = [] for instance in target_group['TargetHealthDescriptions']: instance_health_flat.append(instance['Target']['Id']) failures = 0 for instance in instance_ids: - print(" DEBUG: Checking if " + instance + " is attached to target group...") + logger.info(" Checking if " + instance + " is attached to target group...") if instance in instance_health_flat: - print(" ERROR: Currently attached to the target group...") + logger.info(" ERROR: Currently attached to the target group...") failures = failures + 1 else: - print(" SUCCESS: Instance is not attached to the target group") + logger.info(" SUCCESS: Instance is not attached to the target group") if failures == 0: - print("SUCCESS: Done waiting for detachment of instance ids") + logger.info("SUCCESS: Done waiting for detachment of instance ids") break - print("DEBUG: Waiting for 10 seconds and trying again...") + logger.info("Waiting for 10 seconds and trying again...") time.sleep( 10 ) - print("DEBUG: DONE waiting for detachment of instances from " + target_group_arn) + logger.info("DONE waiting for detachment of instances from " + target_group_arn) def wait_for_complete_targetgroup_autoscaler_detachment( target_group_arn, autoscaling_group_name ): - print("DEBUG: Waiting for detachment of autoscaler " + autoscaling_group_name + " from target_group_arn:" + target_group_arn) + logger.info("Waiting for detachment of autoscaler " + autoscaling_group_name + " from target_group_arn:" + target_group_arn) while True: # Get instances from target group - print("DEBUG: Getting target group instances") + logger.info("Getting target group instances") target_group = elbv2.describe_target_health( TargetGroupArn=target_group_arn ) # Get healthy instance ids from target group - print("DEBUG: Getting instance ids from load balancer") + logger.info("Getting instance ids from load balancer") instance_health_flat = [] for instance in target_group['TargetHealthDescriptions']: instance_health_flat.append(instance['Target']['Id']) # Get our healthy instances from our autoscaler - print("DEBUG: Getting healthy instances on our autoscaler") + logger.info("Getting healthy instances on our autoscaler") as_instances = get_autoscaler_healthy_instances( autoscaling_group_name ) failures = 0 for instance in as_instances: if instance['InstanceId'] in instance_health_flat: - print("DEBUG: FAIL - Instance " + instance['InstanceId'] + " from our autoscaler is still in our target group") + logger.info("FAIL - Instance " + instance['InstanceId'] + " from our autoscaler is still in our target group") failures = failures + 1 else: - print("DEBUG: Success - Instance " + instance['InstanceId'] + " from our autoscaler is not in our target group") + logger.info("Success - Instance " + instance['InstanceId'] + " from our autoscaler is not in our target group") if failures == 0: - print("DEBUG: SUCCESS - We have no instances from the autoscaling group on this target group...") + logger.info("SUCCESS - We have no instances from the autoscaling group on this target group...") break else: - print("WAIT: Found " + str(failures) + " instances still on the target group from the ASG. Waiting 10 seconds...") + logger.info("Found " + str(failures) + " instances still on the target group from the ASG. Waiting 10 seconds...") time.sleep( 10 ) @@ -635,19 +640,19 @@ def flatten_instance_health_array_from_loadbalancer_only_healthy( input_instance def wait_for_complete_loadbalancer_autoscaler_attachment( loadbalancer_name, autoscaling_group_name ): - print("DEBUG: Waiting for attachment of autoscaler " + autoscaling_group_name + " to load balancer:" + loadbalancer_name) + logger.info("Waiting for attachment of autoscaler " + autoscaling_group_name + " to load balancer:" + loadbalancer_name) while True: # Get instances from load balancer - print("DEBUG: Getting load balancer") + logger.info("Getting load balancer") loadbalancer = get_load_balancer(loadbalancer_name) # Get instance ids from load balancer - print("DEBUG: Getting instance ids from load balancer") + logger.info("Getting instance ids from load balancer") temptwo = get_instance_ids_of_load_balancer(loadbalancer) # Get their healths (on the ELB) - print("DEBUG: Getting instance health on the load balancer") + logger.info("Getting instance health on the load balancer") instance_health = elb.describe_instance_health( LoadBalancerName=loadbalancer_name, Instances=loadbalancer['Instances'] @@ -658,50 +663,38 @@ def wait_for_complete_loadbalancer_autoscaler_attachment( loadbalancer_name, aut instance_health_flat = flatten_instance_health_array_from_loadbalancer_only_healthy(instance_health) # Get our healthy instances from our autoscaler - print("DEBUG: Getting healthy instances on our autoscaler") + logger.info("Getting healthy instances on our autoscaler") autoscaler = get_autoscaling_group( autoscaling_group_name ) as_instances = get_autoscaler_healthy_instances( autoscaler ) successes = 0 for instance in as_instances: if instance['InstanceId'] in instance_health_flat: - print("DEBUG: SUCCESS - Instance " + instance['InstanceId'] + " is healthy in our ELB") + logger.info("SUCCESS - Instance " + instance['InstanceId'] + " is healthy in our ELB") successes = successes + 1 else: - print("DEBUG: FAIL - Instance " + instance['InstanceId'] + " is unhealthy or not present in our ELB") + logger.info("FAIL - Instance " + instance['InstanceId'] + " is unhealthy or not present in our ELB") if successes >= len(as_instances): if int(autoscaler['DesiredCapacity']) == successes: - print("DEBUG: We have " + str(successes) + " healthy instances on the elb and on the ASG") + logger.info("We have " + str(successes) + " healthy instances on the elb and on the ASG") break else: - print("WAIT: Found " + str(successes) + " healthy instances on the elb from the ASG " + str(autoscaler['DesiredCapacity']) + " to continue. Waiting 10 seconds...") + logger.info("Found " + str(successes) + " healthy instances on the elb from the ASG " + str(autoscaler['DesiredCapacity']) + " to continue. Waiting 10 seconds...") else: - print("WAIT: Found " + str(successes) + " healthy instances on the elb from the ASG " + str(autoscaler['DesiredCapacity']) + " to continue. Waiting 10 seconds...") + logger.info("Found " + str(successes) + " healthy instances on the elb from the ASG " + str(autoscaler['DesiredCapacity']) + " to continue. Waiting 10 seconds...") time.sleep( 10 ) - - - - - - - - - - - - ###################### # Core application logic ###################### # Verify/get our load balancer -print("Ensuring that \"" + options.autoscaler + "\" is a valid autoscaler in the current region...") +logger.info("Ensuring that \"" + options.autoscaler + "\" is a valid autoscaler in the current region...") autoscaler = get_autoscaling_group(options.autoscaler) if autoscaler is False: - print("ERROR: '" + options.autoscaler + "' is NOT a valid autoscaler, exiting...") + logger.info("ERROR: '" + options.autoscaler + "' is NOT a valid autoscaler, exiting...") parser.print_usage() exit(1) @@ -710,79 +703,79 @@ def wait_for_complete_loadbalancer_autoscaler_attachment( loadbalancer_name, aut autoscaler_old_desired_capacity = int(autoscaler['DesiredCapacity']) # Check if we need to increase our max size -print("Checking if our current desired size is equal to our max size (if so we have to increase max size to deploy)...") +logger.info("Checking if our current desired size is equal to our max size (if so we have to increase max size to deploy)...") if autoscaler_old_max_size == autoscaler_old_desired_capacity: - print("Updating max size of autoscaler by one from " + str(autoscaler_old_max_size)) + logger.info("Updating max size of autoscaler by one from " + str(autoscaler_old_max_size)) if update_auto_scaling_group_max_size(options.autoscaler, (autoscaler_old_max_size + 1) ) is True: - print("Successfully expanded autoscalers max size temporarily for deployment...") + logger.info("Successfully expanded autoscalers max size temporarily for deployment...") else: - print("Failed expanding max-size, will be unable to deploy (until someone implements a different mechanism to deploy)") + logger.info("Failed expanding max-size, will be unable to deploy (until someone implements a different mechanism to deploy)") exit(1) # Letting the user know what this autoscaler is attached to... if len(autoscaler['LoadBalancerNames']) > 0: - print("This autoscaler is attached to the following Elastic Load Balancers (ELBs): ") + logger.info("This autoscaler is attached to the following Elastic Load Balancers (ELBs): ") for name in autoscaler['LoadBalancerNames']: - print(" ELB: " + name) + logger.info(" ELB: " + name) else: - print("This autoscaler is not attached to any ELBs") + logger.info("This autoscaler is not attached to any ELBs") if len(autoscaler['TargetGroupARNs']) > 0: - print("This autoscaler is attached to the following Target Groups (for ALBs): ") + logger.info("This autoscaler is attached to the following Target Groups (for ALBs): ") for name in autoscaler['TargetGroupARNs']: - print(" TG: " + name) + logger.info(" TG: " + name) else: - print("This autoscaler is not attached to any Target Groups") + logger.info("This autoscaler is not attached to any Target Groups") if (options.force): - print("ALERT: We are force-deploying so we're going to skip checking for and setting suspended processes...") + logger.info("ALERT: We are force-deploying so we're going to skip checking for and setting suspended processes...") resume_all_processes( options.autoscaler ) else: - print("Ensuring that we don't have certain suspended processes that we will need to proceed...") + logger.info("Ensuring that we don't have certain suspended processes that we will need to proceed...") required_processes = ['Terminate','Launch','HealthCheck','AddToLoadBalancer'] suspended = get_suspended_processes(autoscaler) succeed = True for process in required_processes: if process in suspended: - print("Error: This autoscaler currently has the required suspended process: " + process) + logger.info("Error: This autoscaler currently has the required suspended process: " + process) succeed = False if succeed == False: exit(1) # Suspending processes so things on an autoscaler can settle -print("Suspending processes so everything can settle on ELB/ALB/TGs: ") +logger.info("Suspending processes so everything can settle on ELB/ALB/TGs: ") suspend_new_processes = ['ScheduledActions', 'AlarmNotification', 'AZRebalance'] suspend_processes( options.autoscaler, suspend_new_processes ) -print("Waiting 3 seconds so the autoscaler can settle from the above change...") +logger.info("Waiting 3 seconds so the autoscaler can settle from the above change...") time.sleep(3) # Get our autoscaler info again... just-incase something changed on it before doing the below health-check logic... autoscaler = get_autoscaling_group(options.autoscaler) # Wait to have healthy == desired instances on the autoscaler -print("Ensuring that we have the right number of instances on the autoscaler") +logger.info("Ensuring that we have the right number of instances on the autoscaler") wait_for_autoscaler_to_have_healthy_desired_instances(autoscaler) # Only if we want to not force-deploy do we check if the instances get health on their respective load balancers/target groups if (not options.force): # Wait to have healthy instances on the load balancers if len(autoscaler['LoadBalancerNames']) > 0: - print("Ensuring that these instances are healthy on the load balancer(s)") + logger.info("Ensuring that these instances are healthy on the load balancer(s)") for name in autoscaler['LoadBalancerNames']: - print("Waiting for all instances to be healthy in " + name + "...") + logger.info("Waiting for all instances to be healthy in " + name + "...") wait_for_complete_loadbalancer_autoscaler_attachment( name, options.autoscaler ) # Wait to have healthy instances on the target groups if len(autoscaler['TargetGroupARNs']) > 0: - print("Ensuring that these instances are healthy on the target group(s)") + logger.info("Ensuring that these instances are healthy on the target group(s)") for name in autoscaler['TargetGroupARNs']: - print("Waiting for all instances to be healthy in " + name + "...") + logger.info("Waiting for all instances to be healthy in " + name + "...") wait_for_complete_targetgroup_autoscaler_attachment( name, options.autoscaler ) -print("====================================================") -print("Performing rollout...") -print("====================================================") +logger.info("====================================================") +logger.info("Performing rollout...") +logger.info("====================================================") # Get our autoscaler info _one_ last time, to make sure we have the instances that we'll be rolling out of service... autoscaler = get_autoscaling_group(options.autoscaler) @@ -790,10 +783,10 @@ def wait_for_complete_loadbalancer_autoscaler_attachment( loadbalancer_name, aut # Gather the instances we need to kill... instances_to_kill = get_autoscaler_healthy_instances(autoscaler) if options.checkifinstancesneedtobeterminated: - print("INFO: Checking if there are instances to skip") + logger.info("INFO: Checking if there are instances to skip") instances_to_skip = get_instances_to_skip(instances_to_kill, autoscaler) for instance in instances_to_skip: - print("DEBUG: Skiping instance " + instance['InstanceId']) + logger.info("Skiping instance " + instance['InstanceId']) instances_to_kill.remove(instance) # Keep a tally of current instances... @@ -802,15 +795,15 @@ def wait_for_complete_loadbalancer_autoscaler_attachment( loadbalancer_name, aut def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): output = [] for instance_array_one in array_one: - # print("Found " + instance_array_one['InstanceId'] + " in array one...") + # logger.info("Found " + instance_array_one['InstanceId'] + " in array one...") found = False for instance_array_two in array_two: if instance_array_two['InstanceId'] == instance_array_one['InstanceId']: - # print("Found " + instance_array_two['InstanceId'] + " in array two also") + # logger.info("Found " + instance_array_two['InstanceId'] + " in array two also") found = True if (not found): - # print("Did not find instance in array two, returning this...") + # logger.info("Did not find instance in array two, returning this...") output.append(instance_array_one) return output @@ -818,7 +811,7 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): # Increase our desired size by one so a new instance will be started (usually from a new launch configuration) # Don't increase desired capacity if there is no instance to kill if len(instances_to_kill) > 0: - print("Increasing desired capacity by one from " + str(autoscaler['DesiredCapacity']) + " to " + str(autoscaler['DesiredCapacity'] + 1)) + logger.info("Increasing desired capacity by one from " + str(autoscaler['DesiredCapacity']) + " to " + str(autoscaler['DesiredCapacity'] + 1)) set_desired_capacity( options.autoscaler, autoscaler['DesiredCapacity'] + 1 ) downscaled = False @@ -827,24 +820,24 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): for i, instance in enumerate(instances_to_kill): # Sleep a little bit every loop, just incase... - print("Sleeping for 3 seconds so the autoscaler can catch-up...") + logger.info("Sleeping for 3 seconds so the autoscaler can catch-up...") time.sleep(3) # This is used in the external "down" helper below, but we need to do this here before we start shutting down this instance old_instance_details = describe_instance(instance['InstanceId']) # Wait to have healthy == desired instances on the autoscaler - print("Ensuring that we have the right number of instances on the autoscaler") + logger.info("Ensuring that we have the right number of instances on the autoscaler") wait_for_autoscaler_to_have_healthy_desired_instances( options.autoscaler ) # Wait for new instances to spin up... while True: - print("Waiting for new instance(s) to spin up...") + logger.info("Waiting for new instance(s) to spin up...") # Lets figure out what the new instance ID(s) are here... new_current_instance_list = get_autoscaler_healthy_instances(options.autoscaler) new_instances = find_aws_instances_in_first_list_but_not_in_second(new_current_instance_list, current_instance_list) if len(new_instances) == 0: - print("There are no new instances yet... waiting 10 seconds...") + logger.info("There are no new instances yet... waiting 10 seconds...") time.sleep(10) else: break; @@ -853,21 +846,21 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): if (not options.skip): # Wait to have healthy instances on the load balancers if len(autoscaler['LoadBalancerNames']) > 0: - print("Ensuring that these instances are healthy on the load balancer(s)") + logger.info("Ensuring that these instances are healthy on the load balancer(s)") for name in autoscaler['LoadBalancerNames']: - print("Waiting for all instances to be healthy in " + name + "...") + logger.info("Waiting for all instances to be healthy in " + name + "...") wait_for_complete_loadbalancer_autoscaler_attachment( name, options.autoscaler ) # Wait to have healthy instances on the target groups if len(autoscaler['TargetGroupARNs']) > 0: - print("Ensuring that these instances are healthy on the target group(s)") + logger.info("Ensuring that these instances are healthy on the target group(s)") for name in autoscaler['TargetGroupARNs']: - print("Waiting for all instances to be healthy in " + name + "...") + logger.info("Waiting for all instances to be healthy in " + name + "...") wait_for_complete_targetgroup_autoscaler_attachment( name, options.autoscaler ) # Wait for instance to get healthy (custom handler) if desired... if (options.checkifnewserverisupcommand): - print("Running external health up check upon request...") + logger.info("Running external health up check upon request...") while True: succeeded_health_up_check = True # String replacing the instance ID and/or the instance IP address into the external script @@ -877,42 +870,42 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): private_ip_address = instance_details['PrivateIpAddress'] if 'PublicIpAddress' in instance_details: public_ip_address = instance_details['PublicIpAddress'] - print("Found new instance " + new_instance['InstanceId'] + " with private IP address " + private_ip_address + " and public IP " + public_ip_address) + logger.info("Found new instance " + new_instance['InstanceId'] + " with private IP address " + private_ip_address + " and public IP " + public_ip_address) else: - print("Found new instance " + new_instance['InstanceId'] + " with private IP address " + private_ip_address + " and NO public IP address") + logger.info("Found new instance " + new_instance['InstanceId'] + " with private IP address " + private_ip_address + " and NO public IP address") tmpcommand = str(options.checkifnewserverisupcommand) tmpcommand = tmpcommand.replace('NEW_INSTANCE_ID',new_instance['InstanceId']) tmpcommand = tmpcommand.replace('NEW_INSTANCE_PRIVATE_IP_ADDRESS', private_ip_address) if 'PublicIpAddress' in instance_details: tmpcommand = tmpcommand.replace('NEW_INSTANCE_PUBLIC_IP_ADDRESS', public_ip_address) - print("Executing external health shell command: " + tmpcommand) + logger.info("Executing external health shell command: " + tmpcommand) retval = call(tmpcommand, shell=True) # print "Got return value " + str(retval) if (retval != 0): succeeded_health_up_check = False except: - print("WARNING: Failed trying to figure out if new instance is healthy") + logger.info("WARNING: Failed trying to figure out if new instance is healthy") if succeeded_health_up_check: - print("SUCCESS: We are done checking instances with a custom command") + logger.info("SUCCESS: We are done checking instances with a custom command") break else: - print("FAIL: We are done checking instances with a custom command, but (at least one) has failed, re-trying in 10 seconds...") + logger.info("FAIL: We are done checking instances with a custom command, but (at least one) has failed, re-trying in 10 seconds...") time.sleep(10) - print("Should de-register instance " + instance['InstanceId'] + " from ALB/ELBs if attached...") + logger.info("Should de-register instance " + instance['InstanceId'] + " from ALB/ELBs if attached...") # If we have load balancers... if len(autoscaler['LoadBalancerNames']) > 0: for name in autoscaler['LoadBalancerNames']: - print("De-registering " + instance['InstanceId'] + " from load balancer " + name + "...") + logger.info("De-registering " + instance['InstanceId'] + " from load balancer " + name + "...") deregister_instance_from_load_balancer( instance['InstanceId'], name ) # If we have target groups... if len(autoscaler['TargetGroupARNs']) > 0: for name in autoscaler['TargetGroupARNs']: - print("De-registering " + instance['InstanceId'] + " from target group " + name + "...") + logger.info("De-registering " + instance['InstanceId'] + " from target group " + name + "...") deregister_instance_from_target_group( instance['InstanceId'], name ) # If we have load balancers... @@ -920,13 +913,13 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): for name in autoscaler['LoadBalancerNames']: while True: instance_ids = get_instance_ids_of_load_balancer( name ) - print("Got instance ids...") - pprint(instance_ids) + logger.info("Got instance ids...") + logger.info(instance_ids) if instance['InstanceId'] in instance_ids: - print("Instance ID is still in load balancer, sleeping for 10 seconds...") + logger.info("Instance ID is still in load balancer, sleeping for 10 seconds...") time.sleep(10) else: - print("Instance ID is removed from load balancer, continuing...") + logger.info("Instance ID is removed from load balancer, continuing...") break # If we have target groups... @@ -935,15 +928,15 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): while True: instance_ids = get_instance_ids_of_target_group( name ) if instance['InstanceId'] in instance_ids: - print("Instance ID is still in target group, sleeping for 10 seconds...") + logger.info("Instance ID is still in target group, sleeping for 10 seconds...") time.sleep(10) else: - print("Instance ID is removed from target group, continuing...") + logger.info("Instance ID is removed from target group, continuing...") break # Run a command on server going down, if desired... if (options.runbeforeserverdowncommand): - print("Running external server down command...") + logger.info("Running external server down command...") # String replacing the instance ID and/or the instance IP address into the external script old_private_ip_address = old_instance_details['PrivateIpAddress'] if 'PublicIpAddress' in old_instance_details: @@ -954,15 +947,15 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): tmpcommand = tmpcommand.replace('OLD_INSTANCE_PRIVATE_IP_ADDRESS', old_private_ip_address) if 'PublicIpAddress' in old_instance_details: tmpcommand = tmpcommand.replace('OLD_INSTANCE_PUBLIC_IP_ADDRESS', old_public_ip_address) - print("Executing before server down command: " + tmpcommand) + logger.info("Executing before server down command: " + tmpcommand) retval = call(tmpcommand, shell=True) # print "Got return value " + str(retval) if (retval != 0): - print("WARNING: Server down command returned retval of " + str(retval)) + logger.info("WARNING: Server down command returned retval of " + str(retval)) # If the user specified they want to wait if (options.waitforseconds > 0): - print("User requested to wait for {0} before terminating instances...".format(options.waitforseconds)) + logger.info("User requested to wait for {0} before terminating instances...".format(options.waitforseconds)) time.sleep(options.waitforseconds) # Re-get our current instance list, for the custom health check script @@ -980,7 +973,7 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): # Run a command on server going down, if desired... if (options.runafterserverdowncommand): - print("Running external server down command after...") + logger.info("Running external server down command after...") time.sleep(2) # String replacing the instance ID and/or the instance IP address into the external script old_private_ip_address = old_instance_details['PrivateIpAddress'] @@ -992,53 +985,53 @@ def find_aws_instances_in_first_list_but_not_in_second( array_one, array_two ): tmpcommand = tmpcommand.replace('OLD_INSTANCE_PRIVATE_IP_ADDRESS', old_private_ip_address) if 'PublicIpAddress' in old_instance_details: tmpcommand = tmpcommand.replace('OLD_INSTANCE_PUBLIC_IP_ADDRESS', old_public_ip_address) - print("Executing after server down command: " + tmpcommand) + logger.info("Executing after server down command: " + tmpcommand) retval = call(tmpcommand, shell=True) # print "Got return value " + str(retval) if (retval != 0): - print("WARNING: Server down command returned retval of " + str(retval)) + logger.info("WARNING: Server down command returned retval of " + str(retval)) instances_to_kill_flat = flatten_instance_health_array_from_loadbalancer( instances_to_kill ) # Before exiting, just incase lets wait for proper detachment of the Classic ELBs (wait for: idle timeout / connection draining to finish) if (not options.force): if len(autoscaler['LoadBalancerNames']) > 0: - print("Ensuring that these instances are fully detached from the load balancer(s)") + logger.info("Ensuring that these instances are fully detached from the load balancer(s)") for name in autoscaler['LoadBalancerNames']: - print("Waiting for complete detachment of old instances from load balancer '" + name + "'...") + logger.info("Waiting for complete detachment of old instances from load balancer '" + name + "'...") wait_for_instances_to_detach_from_loadbalancer( instances_to_kill_flat, name ) # Before exiting, just incase lets wait for proper detachment of the TGs (wait for: idle timeout / connection draining to finish) if len(autoscaler['TargetGroupARNs']) > 0: - print("Ensuring that these instances are fully detached from the target group(s)") + logger.info("Ensuring that these instances are fully detached from the target group(s)") for name in autoscaler['TargetGroupARNs']: - print("Waiting for complete detachment of old instances from target group '" + name + "'...") + logger.info("Waiting for complete detachment of old instances from target group '" + name + "'...") wait_for_instances_to_detach_from_target_group( instances_to_kill_flat, name ) # This should never happen unless the above for loop breaks out unexpectedly if downscaled == False: - print("Manually decreasing desired capacity back to " + str(autoscaler_old_desired_capacity)) + logger.info("Manually decreasing desired capacity back to " + str(autoscaler_old_desired_capacity)) set_desired_capacity( options.autoscaler, autoscaler_old_desired_capacity ) # Resume our processes... if (options.force): - print("ALERT: Resuming all autoscaling processes because of --force...") + logger.info("ALERT: Resuming all autoscaling processes because of --force...") resume_all_processes( options.autoscaler ) else: - print("Resuming suspended processes...") + logger.info("Resuming suspended processes...") resume_processes(options.autoscaler, suspend_new_processes) # Check if we need to decrease our max size back to what it was -print("Checking if we changed our max size, if so, shrink it again...") +logger.info("Checking if we changed our max size, if so, shrink it again...") if autoscaler_old_max_size == autoscaler_old_desired_capacity: - print("Updating max size of autoscaler down one to " + str(autoscaler_old_max_size)) + logger.info("Updating max size of autoscaler down one to " + str(autoscaler_old_max_size)) if update_auto_scaling_group_max_size(options.autoscaler, autoscaler_old_max_size ) is True: - print("Successfully shrunk autoscalers max size back to its old value") + logger.info("Successfully shrunk autoscalers max size back to its old value") else: - print("Failed shrinking max-size for some reason") + logger.info("Failed shrinking max-size for some reason") exit(1) else: - print("Didn't need to shrink our max size") + logger.info("Didn't need to shrink our max size") -print("Successfully zero-downtime deployed!") +logger.info("Successfully zero-downtime deployed!") exit(0)