Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
projection_expression = "#attr"
expression_attribute_names = {"#attr": operator_name}
try:
table = dynamo_resource.Table(table_name)
asset_item = table.get_item(
Key={
'AssetId': asset_id
},
ProjectionExpression=projection_expression,
ExpressionAttributeNames=expression_attribute_names
)
except ClientError as e:
error = e.response['Error']['Message']
logger.error(
"Exception occurred while retreiving metadata for {asset}: {e}".format(asset=asset_id, e=error))
raise ChaliceViewError("Unable to retrieve metadata: {e}".format(e=error))
except Exception as e:
logger.error(
"Exception occurred while retreiving metadata for {asset}: {e}".format(asset=asset_id, e=e))
raise ChaliceViewError("Unable to retrieve metadata: {e}".format(e=e))
else:
if "Item" in asset_item:
pointer = asset_item["Item"][operator_name][0]["pointer"]
s3_object = read_metadata_from_s3(dataplane_s3_bucket, pointer)
# TODO: Add error handling for s3 call
operator_metadata = json.loads(s3_object["Object"])
if is_metadata_list(operator_metadata) is True:
first_page_num = 0
next_page_num = first_page_num + 1
first_page_data = operator_metadata[first_page_num]
def process_lambda_output(output):
'''Return processed lambda output, raise errors if found.'''
if output.get('response'):
return {"Message": output["response"]}
elif output.get('errorMessage'):
raise BadRequestError(output['errorMessage'])
else:
raise ChaliceViewError('unexpected output '
'from lambda call: %s' % str(output))
# Ignore the ConditionalCheckFailedException, bubble up
# other exceptions.
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
raise ConflictError("Workflow with Name {} already exists".format(workflow["Name"]))
else:
raise
except Exception as e:
if "StateMachineArn" in workflow:
response = SFN_CLIENT.delete_state_machine(
workflow["StateMachineArn"]
)
logger.info("Exception {}".format(e))
workflow = None
raise ChaliceViewError("Exception '%s'" % e)
return workflow
# }
)
except ClientError as e:
# Ignore the ConditionalCheckFailedException, bubble up
# other exceptions.
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
raise ConflictError("Workflow with Name {} already exists".format(workflow["Name"]))
else:
raise
except Exception as e:
logger.info("Exception {}".format(e))
workflow = None
raise ChaliceViewError("Exception '%s'" % e)
return workflow
Key={
'Name': operation["Name"]
}
)
raise
except ConflictError as e:
logger.error ("got CoonflictError: {}".format (e))
raise
except ValidationError as e:
logger.error("got bad request error: {}".format(e))
raise BadRequestError(e)
except Exception as e:
logger.info("Exception {}".format(e))
operation = None
raise ChaliceViewError("Exception '%s'" % e)
logger.info("end create_operation: {}".format(json.dumps(operation, cls=DecimalEncoder)))
return operation
stage["Version"] = "v0"
stage["Id"] = str(uuid.uuid4())
stage["Created"] = str(datetime.now().timestamp())
stage["ResourceType"] = "STAGE"
stage["ApiVersion"] = API_VERSION
stage_table.put_item(Item=stage)
except ValidationError as e:
logger.error("got bad request error: {}".format(e))
raise BadRequestError(e)
except Exception as e:
logger.info("Exception {}".format(e))
stage = None
raise ChaliceViewError("Exception '%s'" % e)
return stage
print(workflow)
# Override the default configuration with Configuration key-value pairs that are input to the
# /workflow/execution API. Update only keys that are passed in, leaving the
# defaults for any key that is not specified
for stage, sconfig in Configuration.items():
if stage in workflow["Stages"]:
for operation, oconfig in sconfig.items():
if operation in workflow["Stages"][stage]["Configuration"]:
for key, value in oconfig.items():
workflow["Stages"][stage]["Configuration"][operation][key] = value
else:
workflow_execution["Workflow"] = None
raise ChaliceViewError("Exception: Invalid operation '%s'" % operation)
else:
workflow_execution["Workflow"] = None
raise ChaliceViewError("Exception: Invalid stage found in Configuration '%s'" % stage)
for stage in workflow["Stages"]:
workflow["Stages"][stage]["Status"] = awsmie.STAGE_STATUS_NOT_STARTED
workflow["Stages"][stage]["Metrics"] = {}
workflow["Stages"][stage]["Name"] = stage
workflow["Stages"][stage]["AssetId"] = asset_id
workflow["Stages"][stage]["WorkflowExecutionId"] = workflow_execution["Id"]
if "MetaData" not in workflow["Stages"][stage]:
workflow["Stages"][stage]["MetaData"] = {}
workflow_execution["Workflow"] = workflow
# initialize top level workflow_execution state from the workflow
workflow_execution["Status"] = awsmie.WORKFLOW_STATUS_QUEUED
workflow_execution["CurrentStage"] = current_stage = workflow["StartAt"]
# TODO: Do I want to add another check here?
cursor = app.current_request.query_params['cursor']
first_call = False
if first_call is True:
try:
table = dynamo_resource.Table(table_name)
asset_item = table.get_item(
Key={
'AssetId': asset_id
}
)
except ClientError as e:
error = e.response['Error']['Message']
logger.error("Exception occurred while retreiving metadata for {asset}: {e}".format(asset=asset_id, e=error))
raise ChaliceViewError("Unable to retrieve metadata: {e}".format(e=error))
except Exception as e:
logger.error(
"Exception occurred while retreiving metadata for {asset}: {e}".format(asset=asset_id, e=e))
raise ChaliceViewError("Unable to retrieve metadata: {e}".format(e=e))
else:
# TODO: Should clarify varaible names for first page in the context of the
# entire request vs. a page for a specific operator
if "Item" in asset_item:
asset_attributes = asset_item["Item"]
global_attributes = ['S3Key', 'S3Bucket', 'AssetId', 'Created']
remaining_attributes = list(set(asset_attributes.keys()) - set(global_attributes))
remaining = []
global_asset_info = {}
for attr in global_attributes: