Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def allowed_fields(self):
return [
Field.Comment,
Field.InputPath,
Field.OutputPath,
Field.Parameters,
Field.ResultPath,
Field.Retry,
Field.Catch,
Field.ItemsPath,
Field.Iterator,
Field.MaxConcurrency
]
def allowed_fields(self):
return [
Field.TimeoutSeconds,
Field.Comment,
Field.Version
]
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:`sagemaker.amazon.amazon_estimator.RecordSet` objects,
where each instance is a different channel of training data.
hyperparameters (dict, optional): Specify the hyper parameters for the training. (Default: None)
mini_batch_size (int): Specify this argument only when estimator is a built-in estimator of an Amazon algorithm. For other estimators, batch size should be specified in the estimator.
experiment_config (dict, optional): Specify the experiment config for the training. (Default: None)
wait_for_completion (bool, optional): Boolean value set to `True` if the Task state should wait for the training job to complete before proceeding to the next step in the workflow. Set to `False` if the Task state should submit the training job and proceed to the next step. (default: True)
tags (list[dict], optional): `List to tags `_ to associate with the resource.
"""
self.estimator = estimator
self.job_name = job_name
if wait_for_completion:
kwargs[Field.Resource.value] = 'arn:aws:states:::sagemaker:createTrainingJob.sync'
else:
kwargs[Field.Resource.value] = 'arn:aws:states:::sagemaker:createTrainingJob'
if isinstance(job_name, str):
parameters = training_config(estimator=estimator, inputs=data, job_name=job_name, mini_batch_size=mini_batch_size)
else:
parameters = training_config(estimator=estimator, inputs=data, mini_batch_size=mini_batch_size)
if estimator.debugger_hook_config != None:
parameters['DebugHookConfig'] = estimator.debugger_hook_config._to_request_dict()
if estimator.rules != None:
parameters['DebugRuleConfigurations'] = [rule.to_debugger_rule_config_dict() for rule in estimator.rules]
if isinstance(job_name, (ExecutionInput, StepInput)):
parameters['TrainingJobName'] = job_name
if hyperparameters is not None:
def to_dict(self):
result = super(Parallel, self).to_dict()
result[Field.Branches.name] = [
Graph(branch).to_dict() for branch in self.branches
]
return result
def allowed_fields(self):
return [
Field.Comment,
Field.Error,
Field.Cause
]
def allowed_fields(self):
return [
Field.Comment,
Field.InputPath,
Field.OutputPath,
Field.Parameters,
Field.ResultPath,
Field.Retry,
Field.Catch
]
def allowed_fields(self):
return [
Field.Comment,
Field.InputPath,
Field.OutputPath,
Field.Parameters,
Field.ResultPath,
Field.Retry,
Field.Catch,
Field.ItemsPath,
Field.Iterator,
Field.MaxConcurrency
]
def to_dict(self):
result = super(State, self).to_dict()
# Next step
if self.next_step is not None:
result[Field.Next.name] = self.next_step.state_id
elif self.state_type not in ('Succeed', 'Fail', 'Choice'):
result[Field.End.name] = True
# Retry and catch
if self.retries and self.is_field_allowed(Field.Retry.value):
result[Field.Retry.name] = [retry.to_dict() for retry in self.retries]
if self.catches and self.is_field_allowed(Field.Catch.value):
result[Field.Catch.name] = [catch.to_dict() for catch in self.catches]
return result
def allowed_fields(self):
return [
Field.Comment,
Field.InputPath,
Field.OutputPath,
Field.Parameters,
Field.ResultPath,
Field.Retry,
Field.Catch,
Field.ItemsPath,
Field.Iterator,
Field.MaxConcurrency
]
def allowed_fields(self):
return [
Field.Comment,
Field.InputPath,
Field.OutputPath,
Field.Parameters,
Field.ResultPath
]