Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _launch_predict_remote(self, checkpoint_path):
self.service = self.global_project.services.get(service_name='predict')
model_specs = self.optimal_model.unwrap()
dataset_input = dl.FunctionIO(type='Dataset', name='dataset', value={"dataset_id": self.dataset_id})
checkpoint_path_input = dl.FunctionIO(type='Json', name='checkpoint_path', value={"checkpoint_path": checkpoint_path})
val_query_input = dl.FunctionIO(type='Json', name='val_query', value=self.val_query)
model_specs_input = dl.FunctionIO(type='Json', name='model_specs', value=model_specs)
inputs = [dataset_input, val_query_input, checkpoint_path_input, model_specs_input]
logger.info('checkpoint is type: ' + str(type(checkpoint_path)))
try:
logger.info("trying to get execution object")
execution_obj = self._run_pred_remote_execution(inputs)
logger.info("got execution object")
# TODO: Turn execution_obj into metrics
while execution_obj.latest_status['status'] != 'success':
time.sleep(5)
execution_obj = dl.executions.get(execution_id=execution_obj.id)
if execution_obj.latest_status['status'] == 'failed':
raise Exception("plugin execution failed")
logger.info("execution object status is successful")
def push_package(project):
dataset_input = dl.FunctionIO(type='Dataset', name='dataset')
train_query_input = dl.FunctionIO(type='Json', name='train_query')
val_query_input = dl.FunctionIO(type='Json', name='val_query')
hp_value_input = dl.FunctionIO(type='Json', name='hp_values')
model_specs_input = dl.FunctionIO(type='Json', name='model_specs')
checkpoint_path_input = dl.FunctionIO(type='Json', name='checkpoint_path')
package_name_input = dl.FunctionIO(type='Json', name='package_name')
configs_input = dl.FunctionIO(type='Json', name='configs')
time_input = dl.FunctionIO(type='Json', name='time')
test_dataset_input = dl.FunctionIO(type='Json', name='test_dataset_id')
query_input = dl.FunctionIO(type='Json', name='query')
item_input = dl.FunctionIO(type='Item', name='item')
model_input = dl.FunctionIO(type='Json', name='model_id')
checkpoint_input = dl.FunctionIO(type='Json', name='checkpoint_id')
predict_inputs = [dataset_input, val_query_input, checkpoint_path_input, model_specs_input]
model_inputs = [dataset_input, train_query_input, val_query_input, hp_value_input, model_specs_input]
zazu_inputs = [configs_input]
predict_function = dl.PackageFunction(name='run', inputs=predict_inputs, outputs=[], description='')
model_function = dl.PackageFunction(name='run', inputs=model_inputs, outputs=[], description='')
zazu_search_function = dl.PackageFunction(name='search', inputs=zazu_inputs, outputs=[], description='')
zazu_predict_function = dl.PackageFunction(name='predict', inputs=zazu_inputs, outputs=[], description='')
timer_update_function = dl.PackageFunction(name='update_time', inputs=time_input, outputs=[], description='')
predict_item_function = dl.PackageFunction(name='predict_single_item', inputs=[item_input], outputs=[],
description='')
def _launch_predict_remote(self, checkpoint_path):
self.service = self.global_project.services.get(service_name='predict')
model_specs = self.optimal_model.unwrap()
dataset_input = dl.FunctionIO(type='Dataset', name='dataset', value={"dataset_id": self.dataset_id})
checkpoint_path_input = dl.FunctionIO(type='Json', name='checkpoint_path', value={"checkpoint_path": checkpoint_path})
val_query_input = dl.FunctionIO(type='Json', name='val_query', value=self.val_query)
model_specs_input = dl.FunctionIO(type='Json', name='model_specs', value=model_specs)
inputs = [dataset_input, val_query_input, checkpoint_path_input, model_specs_input]
logger.info('checkpoint is type: ' + str(type(checkpoint_path)))
try:
logger.info("trying to get execution object")
execution_obj = self._run_pred_remote_execution(inputs)
logger.info("got execution object")
# TODO: Turn execution_obj into metrics
while execution_obj.latest_status['status'] != 'success':
time.sleep(5)
execution_obj = dl.executions.get(execution_id=execution_obj.id)
if execution_obj.latest_status['status'] == 'failed':
raise Exception("plugin execution failed")
logger.info("execution object status is successful")
# download artifacts, should contain dir with txt file annotations
def push_and_deploy_package(self, project, package_name):
logger.info('dtlpy version:', dl.__version__)
dataset_input = dl.FunctionIO(type='Dataset', name='dataset')
hp_value_input = dl.FunctionIO(type='Json', name='hp_values')
model_specs_input = dl.FunctionIO(type='Json', name='model_specs')
init_specs_input = dl.FunctionIO(type='Json', name='package_name')
input_to_init = {
'package_name': package_name
}
inputs = [dataset_input, hp_value_input, model_specs_input]
function = dl.PackageFunction(name='run', inputs=inputs, outputs=[], description='')
module = dl.PackageModule(entry_point='dataloop_services/service_executor.py', name='service_executor',
functions=[function],
init_inputs=init_specs_input)
package = project.packages.push(
package_name=package_name,
src_path=os.getcwd(),
modules=[module])
logger.info('deploying package . . .')
def _launch_remote_best_trial(self, best_trial):
model_specs = self.optimal_model.unwrap()
dataset_input = dl.FunctionIO(type='Dataset', name='dataset', value={"dataset_id": self.dataset_id})
train_query_input = dl.FunctionIO(type='Json', name='train_query', value=self.train_query)
val_query_input = dl.FunctionIO(type='Json', name='val_query', value=self.val_query)
hp_value_input = dl.FunctionIO(type='Json', name='hp_values', value=best_trial['hp_values'])
model_specs_input = dl.FunctionIO(type='Json', name='model_specs', value=model_specs)
inputs = [dataset_input, train_query_input, val_query_input, hp_value_input, model_specs_input]
execution_obj = self._run_trial_remote_execution(inputs)
while execution_obj.latest_status['status'] != 'success':
time.sleep(5)
execution_obj = dl.executions.get(execution_id=execution_obj.id)
if execution_obj.latest_status['status'] == 'failed':
raise Exception("package execution failed")
return execution_obj
def _launch_remote_best_trial(self, best_trial):
model_specs = self.optimal_model.unwrap()
dataset_input = dl.FunctionIO(type='Dataset', name='dataset', value={"dataset_id": self.dataset_id})
train_query_input = dl.FunctionIO(type='Json', name='train_query', value=self.train_query)
val_query_input = dl.FunctionIO(type='Json', name='val_query', value=self.val_query)
hp_value_input = dl.FunctionIO(type='Json', name='hp_values', value=best_trial['hp_values'])
model_specs_input = dl.FunctionIO(type='Json', name='model_specs', value=model_specs)
inputs = [dataset_input, train_query_input, val_query_input, hp_value_input, model_specs_input]
execution_obj = self._run_trial_remote_execution(inputs)
while execution_obj.latest_status['status'] != 'success':
time.sleep(5)
execution_obj = dl.executions.get(execution_id=execution_obj.id)
if execution_obj.latest_status['status'] == 'failed':
raise Exception("package execution failed")
return execution_obj
trial_service.delete()
zazu_service.delete()
if args.zazu_timer:
logger.info('about to launch timer deployment')
with open('global_configs.json', 'r') as fp:
global_project_name = json.load(fp)['project']
global_project = dl.projects.get(project_name=global_project_name)
global_package_obj = push_package(global_project)
with open('configs.json', 'r') as fp:
configs = json.load(fp)
configs_input = dl.FunctionIO(type='Json', name='configs', value=json.dumps(configs))
time_input = dl.FunctionIO(type='Json', name='time', value=3600*0.25)
test_dataset_input = dl.FunctionIO(type='Json', name='test_dataset_id', value='5eb7e0bdd4eb9434c77d80b5')
query_input = dl.FunctionIO(type='Json', name='query', value=json.dumps({"resource": "items", "sort": {}, "page": 0, "pageSize": 1000, "filter": {"$and": [{"dir": "/items/val*"}, {"hidden": False}, {"type": "file"}]}}))
init_inputs = [configs_input, time_input, test_dataset_input, query_input]
deploy_zazu_timer(package=global_package_obj,
init_inputs=init_inputs)
logger.info('timer deployment launched successfully')
if args.update:
with open('global_configs.json', 'r') as fp:
global_project_name = json.load(fp)
maybe_login()
global_project = dl.projects.get(project_name=global_project_name)
update_service(global_project, 'trial')
update_service(global_project, 'zazu')
if args.zazu_timer:
logger.info('about to launch timer deployment')
with open('global_configs.json', 'r') as fp:
global_project_name = json.load(fp)['project']
global_project = dl.projects.get(project_name=global_project_name)
global_package_obj = push_package(global_project)
with open('configs.json', 'r') as fp:
configs = json.load(fp)
configs_input = dl.FunctionIO(type='Json', name='configs', value=json.dumps(configs))
time_input = dl.FunctionIO(type='Json', name='time', value=3600*0.25)
test_dataset_input = dl.FunctionIO(type='Json', name='test_dataset_id', value='5eb7e0bdd4eb9434c77d80b5')
query_input = dl.FunctionIO(type='Json', name='query', value=json.dumps({"resource": "items", "sort": {}, "page": 0, "pageSize": 1000, "filter": {"$and": [{"dir": "/items/val*"}, {"hidden": False}, {"type": "file"}]}}))
init_inputs = [configs_input, time_input, test_dataset_input, query_input]
deploy_zazu_timer(package=global_package_obj,
init_inputs=init_inputs)
logger.info('timer deployment launched successfully')
if args.update:
with open('global_configs.json', 'r') as fp:
global_project_name = json.load(fp)
maybe_login()
global_project = dl.projects.get(project_name=global_project_name)
update_service(global_project, 'trial')
update_service(global_project, 'zazu')