Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
params[p] = module.params.get(p)
if d_b_name:
params['d_b_name'] = d_b_name
try:
redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
changed = False
except is_boto3_error_code('ClusterNotFound'):
try:
redshift.create_cluster(ClusterIdentifier=identifier,
NodeType=node_type,
MasterUsername=username,
MasterUserPassword=password,
**snake_dict_to_camel_dict(params, capitalize_first=True))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to create cluster")
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to describe cluster")
if wait:
attempts = wait_timeout // 60
waiter = redshift.get_waiter('cluster_available')
try:
waiter.wait(
ClusterIdentifier=identifier,
WaiterConfig=dict(MaxAttempts=attempts)
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Timeout waiting for the cluster creation")
try:
resource = redshift.describe_clusters(ClusterIdentifier=identifier)['Clusters'][0]
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
def _force_detach_volume(volume):
log.info("Force detaching all volume attachments.")
for attachment in volume.attachments:
try:
log.info("Volume has attachment: {}".format(attachment))
log.info("Detaching volume from instance: {}".format(attachment['InstanceId']))
volume.detach_from_instance(
DryRun=False,
InstanceId=attachment['InstanceId'],
Device=attachment['Device'],
Force=True)
except exceptions.ClientError as exc:
log.exception("Failed to detach volume")
# See the following link for the structure of the exception:
# https://github.com/boto/botocore/blob/4d4c86b2bdd4b7a8e110e02abd4367f07137ca47/botocore/exceptions.py#L346
err_message = exc.response['Error']['Message']
err_code = exc.response['Error']['Code']
# See the following link for details of the error message:
# https://jira.mesosphere.com/browse/DCOS-37441?focusedCommentId=156163&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-156163
available_msg = "is in the 'available' state"
if err_code == 'IncorrectState' and available_msg in err_message:
log.info("Ignoring benign exception")
return
raise
def run_simulation(session, roles, rolename, rolechecks):
iamc = session.client('iam')
errormsg = []
with ActionOnExit('Checking role {rolename}..', **vars()) as act:
for checkname, checkoptions in sorted(rolechecks.items()):
try:
result = iamc.simulate_custom_policy(PolicyInputList=[json.dumps(roles[rolename]['policy'])],
**checkoptions['simulation_options'])
except botocore.exceptions.ClientError as e:
act.fatal_error(e)
results = result['EvaluationResults']
while result.get('IsTruncated', False):
result = iamc.simulate_custom_policy(Marker=result['Marker'],
PolicyInputList=[json.dumps(roles[rolename]['policy'])],
**checkoptions['simulation_options'])
results.extend(result['EvaluationResults'])
for result in results:
if result['EvalDecision'] != checkoptions['simulation_result']:
errormsg.append('[{}] {} is {} and NOT {}'.format(checkname,
result['EvalActionName'],
result['EvalDecision'],
checkoptions['simulation_result']))
if len(errormsg):
act.error('mismatch')
def get_stack_facts(cfn, stack_name):
try:
stack_response = describe_stacks(cfn, stack_name)
stack_info = stack_response['Stacks'][0]
#except AmazonCloudFormationException as e:
except (botocore.exceptions.ValidationError,botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist'.format(stack_name) in error_msg:
# missing stack, don't bail.
return None
# other error, bail.
raise err
if stack_response and stack_response.get('Stacks', None):
stacks = stack_response['Stacks']
if len(stacks):
stack_info = stacks[0]
return stack_info
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
subnet_ids = []
region = terms[0][0]
subnet_names = terms[0][1]
session = boto3.session.Session(region_name=region)
try:
ec2_client = session.client('ec2')
except botocore.exceptions.NoRegionError:
raise AnsibleError("AWS region not specified.")
subnet_filter = [{'Name': 'tag:Name', 'Values': subnet_names}]
result = ec2_client.describe_subnets(Filters=subnet_filter)
subnets = result.get('Subnets')
if subnets:
for subnet in subnets:
subnet_ids.append(subnet.get('SubnetId').encode('utf-8'))
return subnet_ids
def _stack_exists(stack_name):
cloudformation = boto3.client('cloudformation')
try:
response = cloudformation.describe_stacks(
StackName=stack_name
)
stacks = response['Stacks']
except (KeyError, botocore.exceptions.ClientError):
return False
for stack in stacks:
if stack['StackStatus'] == 'DELETE_COMPLETE':
continue
if stack_name == stack['StackName']:
return True
return False
def delete_pipeline(client, name, module):
try:
resp = client.delete_pipeline(name=name)
return resp
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete pipeline {0}: {1}".format(name, to_native(e)),
exception=traceback.format_exc())
response = client.create_snapshot(
DryRun=False,
VolumeId=volume_id,
Description="EC2 Rescue for Linux Created Snapshot for " + volume_id + " at " +
datetime.datetime.utcnow().strftime("%Y/%m/%d %H-%M-%S")
)
ec2rlcore.dual_log("Creating snapshot " + response["SnapshotId"] + " for volume " + volume_id)
if describe_snapshot_status(response["SnapshotId"]) == "error":
raise BackupSnapshotError
del client
except botocore.exceptions.NoCredentialsError:
raise BackupNoCredsError
except botocore.exceptions.ClientError as error:
raise BackupClientError(error.response)
except TypeError:
raise BackupSpecificationError
return response["SnapshotId"]
{"c": {"region": "f"}}}
Note that the "foo" key comes from A, even though it's defined in both
FileA and FileB. Because "foo" was defined in FileA first, then the values
for "foo" from FileA are used and the values for "foo" from FileB are
ignored. Also note where the profiles originate from. Profile "a"
comes FileA, profile "b" comes from FileB, and profile "c" comes
from FileC.
"""
configs = []
profiles = []
for filename in filenames:
try:
loaded = load_config(filename)
except botocore.exceptions.ConfigNotFound:
continue
profiles.append(loaded.pop('profiles'))
configs.append(loaded)
merged_config = _merge_list_of_dicts(configs)
merged_profiles = _merge_list_of_dicts(profiles)
merged_config['profiles'] = merged_profiles
return merged_config
def run(self, terms, variables=None, **kwargs):
region = terms[0][0]
instance_name = terms[0][1]
session=boto3.session.Session(region_name=region)
try:
rds_client=session.client('rds')
except botocore.exceptions.NoRegionError:
raise AnsibleError("AWS region not specified.")
result=rds_client.describe_db_instances(DBInstanceIdentifier=instance_name)
if result and result.get('DBInstances'):
return [result.get('DBInstances')[0].get('Endpoint').get('Port').encode('utf-8')]
return None