Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
mx = MXNet(entry_point=SCRIPT_PATH,
role='SageMakerRole',
train_instance_count=instance_count,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
image_name=ecr_image,
hyperparameters=hyperparameters)
with timeout(minutes=15):
prefix = 'mxnet_mnist/{}'.format(utils.sagemaker_timestamp())
train_input = mx.sagemaker_session.upload_data(path=os.path.join(DATA_PATH, 'train'),
key_prefix=prefix + '/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(DATA_PATH, 'test'),
key_prefix=prefix + '/test')
job_name = utils.unique_name_from_base('test-mxnet-image')
mx.fit({'train': train_input, 'test': test_input}, job_name=job_name)
)
trial_name = "pytorch-container-integ-test-{}".format(int(time.time()))
trial = Trial.create(
experiment_name=experiment_name, trial_name=trial_name, sagemaker_boto_client=sm_client
)
hyperparameters = {
"random_seed": True,
"num_steps": 50,
"smdebug_path": "/opt/ml/output/tensors",
"epochs": 1,
"data_dir": training_dir,
}
training_job_name = utils.unique_name_from_base("test-pytorch-experiments-image")
# create a training job and wait for it to complete
with timeout(minutes=DEFAULT_TIMEOUT):
pytorch = PyTorch(
entry_point=smdebug_mnist_script,
role="SageMakerRole",
train_instance_count=1,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
image_name=ecr_image,
hyperparameters=hyperparameters,
)
training_input = pytorch.sagemaker_session.upload_data(
path=training_dir, key_prefix="pytorch/mnist"
)
pytorch.fit({"training": training_input}, job_name=training_job_name)
mx = MXNet(entry_point=SCRIPT_PATH,
role='SageMakerRole',
train_instance_count=instance_count,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
image_name=ecr_image,
hyperparameters=hyperparameters)
with timeout(minutes=15):
prefix = 'mxnet_mnist_gluon_basic_hook_demo/{}'.format(utils.sagemaker_timestamp())
train_input = mx.sagemaker_session.upload_data(path=os.path.join(DATA_PATH, 'train'),
key_prefix=prefix + '/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(DATA_PATH, 'test'),
key_prefix=prefix + '/test')
job_name = utils.unique_name_from_base('test-mxnet-image')
mx.fit({'train': train_input, 'test': test_input}, job_name=job_name)
def _test_dist_operations(sagemaker_session, image_uri, instance_type, dist_backend, train_instance_count=3):
with timeout(minutes=DEFAULT_TIMEOUT):
pytorch = PyTorch(entry_point=dist_operations_path,
role='SageMakerRole',
train_instance_count=train_instance_count,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
image_name=image_uri,
debugger_hook_config=False,
hyperparameters={'backend': dist_backend})
pytorch.sagemaker_session.default_bucket()
fake_input = pytorch.sagemaker_session.upload_data(path=dist_operations_path,
key_prefix='pytorch/distributed_operations')
job_name = utils.unique_name_from_base('test-pytorch-dist-ops')
pytorch.fit({'required_argument': fake_input}, job_name=job_name)
def test_hosting(sagemaker_session, image_uri, instance_type, framework_version):
prefix = 'mxnet-serving/default-handlers'
model_data = sagemaker_session.upload_data(path=MODEL_PATH, key_prefix=prefix)
model = MXNetModel(model_data,
'SageMakerRole',
SCRIPT_PATH,
image=image_uri,
framework_version=framework_version,
sagemaker_session=sagemaker_session)
endpoint_name = utils.unique_name_from_base('test-mxnet-serving')
with timeout.timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
predictor = model.deploy(1, instance_type, endpoint_name=endpoint_name)
output = predictor.predict([[1, 2]])
assert [[4.9999918937683105]] == output
def _upload_code(self, key_prefix, repack=False):
"""
Args:
key_prefix:
repack:
"""
local_code = utils.get_config_value("local.local_code", self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
self.uploaded_code = None
elif not repack:
bucket = self.bucket or self.sagemaker_session.default_bucket()
self.uploaded_code = fw_utils.tar_and_upload_dir(
session=self.sagemaker_session.boto_session,
bucket=bucket,
s3_key_prefix=key_prefix,
script=self.entry_point,
directory=self.source_dir,
dependencies=self.dependencies,
)
if repack:
bucket = self.bucket or self.sagemaker_session.default_bucket()
repacked_model_data = "s3://" + os.path.join(bucket, key_prefix, "model.tar.gz")
self,
role=None,
vpc_config_override=VPC_CONFIG_DEFAULT,
entry_point=None,
source_dir=None,
dependencies=None,
**kwargs
):
"""Placeholder docstring"""
return Model(
model_data=self.model_data,
role=role,
image=self.image_name,
name=self._current_job_name,
container_log_level=self.container_log_level,
framework_version=utils.get_short_version(self.framework_version),
sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override),
entry_point=entry_point,
source_dir=source_dir,
dependencies=dependencies,
enable_network_isolation=self.enable_network_isolation(),
**kwargs
)
elif not repack:
bucket = self.bucket or self.sagemaker_session.default_bucket()
self.uploaded_code = fw_utils.tar_and_upload_dir(
session=self.sagemaker_session.boto_session,
bucket=bucket,
s3_key_prefix=key_prefix,
script=self.entry_point,
directory=self.source_dir,
dependencies=self.dependencies,
)
if repack:
bucket = self.bucket or self.sagemaker_session.default_bucket()
repacked_model_data = "s3://" + os.path.join(bucket, key_prefix, "model.tar.gz")
utils.repack_model(
inference_script=self.entry_point,
source_directory=self.source_dir,
dependencies=self.dependencies,
model_uri=self.model_data,
repacked_model_uri=repacked_model_data,
sagemaker_session=self.sagemaker_session,
kms_key=self.model_kms_key,
)
self.repacked_model_data = repacked_model_data
self.uploaded_code = UploadedCode(
s3_prefix=self.repacked_model_data, script_name=os.path.basename(self.entry_point)
)
Args:
instance_type (str): The EC2 instance type that this Model will be
used for, this is only used to determine if the image needs GPU
support or not.
accelerator_type (str): Type of Elastic Inference accelerator to
attach to an endpoint for model loading and inference, for
example, 'ml.eia1.medium'. If not specified, no Elastic
Inference accelerator will be attached to the endpoint.
tags (List[dict[str, str]]): Optional. The list of tags to add to
the model. Example: >>> tags = [{'Key': 'tagname', 'Value':
'tagvalue'}] For more information about tags, see
https://boto3.amazonaws.com/v1/documentation
/api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
"""
container_def = self.prepare_container_def(instance_type, accelerator_type=accelerator_type)
self.name = self.name or utils.name_from_image(container_def["Image"])
enable_network_isolation = self.enable_network_isolation()
self.sagemaker_session.create_model(
self.name,
self.role,
container_def,
vpc_config=self.vpc_config,
enable_network_isolation=enable_network_isolation,
tags=tags,
)