Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for i in range(max_instance)])
# parse and create dljoin id column
label_col_info = conn.columninfo(output).ColumnInfo
filename_col_length = label_col_info.loc[label_col_info['Column'] == 'idjoin', ['FormattedLength']].values[0][0]
image_sas_code = "length idjoin $ {0}; fn=scan(_path_,{1},'/'); idjoin = inputc(substr(fn, 1, length(fn)-4),'{0}.');".format(filename_col_length,
len(data_path.split('\\')) - 2)
img_tbl = conn.CASTable(det_img_table, computedvars=['idjoin'], computedvarsprogram=image_sas_code, vars=[{'name': '_image_'}])
# join the image table and label table together
res = conn.deepLearn.dljoin(table=img_tbl, annotation=output, id='idjoin',
casout={'name': output, 'replace': True, 'replication': 0})
if res.severity > 0:
raise DLPyError('ERROR: Fail to create the object detection table.')
with sw.option_context(print_messages=False):
for name in input_tbl_name:
conn.table.droptable(name)
for var in var_name:
conn.table.droptable('output{}'.format(var))
conn.table.droptable(det_img_table)
print("NOTE: Object detection table is successfully created.")
return var_order[2:]
weightFilePath=file_name,
caslib=cas_lib_name)
else:
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
textEmbeddingDim=embedding_dim,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name)
else:
if has_gpu_model:
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights', model=self.model_table,
modelWeights=dict(replace=True,
name=self.model_name + '_weights'),
formatType=format_type, weightFilePath=file_name,
gpuModel=use_gpu,
caslib=cas_lib_name)
else:
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights', model=self.model_table,
modelWeights=dict(replace=True,
name=self.model_name + '_weights'),
formatType=format_type, weightFilePath=file_name,
caslib=cas_lib_name)
# handle error or create necessary attributes
if rt.severity > 1:
caslib=cas_lib_name,
labelTable=label_table)
else:
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
textEmbeddingDim=embedding_dim,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name,
labelTable=label_table)
else:
if has_gpu_model:
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights', model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
formatType=format_type, weightFilePath=file_name, caslib=cas_lib_name,
gpuModel=use_gpu,
labelTable=label_table)
else:
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights', model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name,
labelTable=label_table)
# handle error or create necessary attributes
if rt.severity > 1:
else:
sep = '\\'
# label variables, _ : category;
yolo_var_name = ['_', '_x', '_y', '_width', '_height']
coco_var_name = ['_', '_xmin', '_ymin', '_xmax', '_ymax']
if coord_type.lower() == 'yolo':
var_name = yolo_var_name
elif coord_type.lower() == 'coco':
var_name = coco_var_name
image_size = _pair(image_size) # ensure image_size is a pair
det_img_table = random_name('DET_IMG')
# loading _image_ and processing to required image size
with caslibify_context(conn, data_path, 'load') as (caslib, path_after_caslib), \
sw.option_context(print_messages=False):
if caslib is None and path_after_caslib is None:
print('Cannot create a caslib for the provided path. Please make sure that the path is accessible from'
'the CAS Server. Please also check if there is a subpath that is part of an existing caslib')
res = conn.image.loadImages(path=path_after_caslib,
recurse=False,
labelLevels=-1,
caslib=caslib,
casout={'name': det_img_table, 'replace':True})
if res.severity > 0:
for msg in res.messages:
if not msg.startswith('WARNING'):
print(msg)
res = conn.image.processImages(table={'name': det_img_table},
imagefunctions=[
if has_data_spec:
# run action with dataSpec option
if has_gpu_model and (not has_embedding_dim):
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
gpuModel=use_gpu,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name,
labelTable=label_table)
elif (not has_gpu_model) and (not has_embedding_dim):
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name,
labelTable=label_table)
elif has_gpu_model and has_embedding_dim:
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
gpuModel=use_gpu,
textEmbeddingDim=embedding_dim,
def get_cas_host_type(conn):
''' Return a server type indicator '''
with sw.option_context(print_messages = False):
out = conn.about()
ostype = out['About']['System']['OS Family']
stype = 'mpp'
htype = 'nohdfs'
if out['server'].loc[0, 'nodes'] == 1:
stype = 'smp'
if ostype.startswith('LIN') or ostype.startswith('LX'):
ostype = 'linux'
elif ostype.startswith('WIN'):
ostype = 'windows'
elif ostype.startswith('OSX'):
ostype = 'osx'
else:
raise ValueError('Unknown OS type: ' + ostype)
# Check to see if HDFS is present
Default: (416, 416)
task : str, optional
Specifies the task of table.
Valid Values: object detection, instance segmentation
Default: object detection
Returns
-------
A list of variables that are the labels of the specified task
'''
# check parameters
if coord_type.lower() not in ['yolo', 'coco']:
raise ValueError('coord_type, {}, is not supported'.format(coord_type))
with sw.option_context(print_messages=False):
server_type = get_cas_host_type(conn).lower()
local_os_type = platform.system()
unix_type = server_type.startswith("lin") or server_type.startswith("osx")
# check if local and server are same type of OS
# in different os
need_to_parse = True
if (unix_type and local_os_type.startswith('Win')) or not (unix_type or local_os_type.startswith('Win')):
if local_path is None:
print('The txt files in data_path are used as annotation files.')
need_to_parse = False
else:
local_path = data_path
conn.retrieve('loadactionset', _messagelevel='error', actionset='image')
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'deepLearn')
conn.retrieve('loadactionset', _messagelevel = 'error', actionset = 'transpose')
'support this parameter.')
if has_data_spec:
# run action with dataSpec option
if has_gpu_model and (not has_embedding_dim):
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
gpuModel=use_gpu,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name)
elif (not has_gpu_model) and (not has_embedding_dim):
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name)
elif has_gpu_model and has_embedding_dim:
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
gpuModel=use_gpu,
textEmbeddingDim=embedding_dim,
formatType=format_type,
textEmbeddingDim=embedding_dim,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name)
else:
if has_gpu_model:
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights', model=self.model_table,
modelWeights=dict(replace=True,
name=self.model_name + '_weights'),
formatType=format_type, weightFilePath=file_name,
gpuModel=use_gpu,
caslib=cas_lib_name)
else:
with sw.option_context(print_messages=False):
rt = self._retrieve_('deeplearn.dlimportmodelweights', model=self.model_table,
modelWeights=dict(replace=True,
name=self.model_name + '_weights'),
formatType=format_type, weightFilePath=file_name,
caslib=cas_lib_name)
# handle error or create necessary attributes
if rt.severity > 1:
for msg in rt.messages:
print(msg)
raise DLPyError('Cannot import model weights, there seems to be a problem.')
# create attributes if necessary
if not has_data_spec:
from dlpy.attribute_utils import create_extended_attributes
create_extended_attributes(self.conn, self.model_name, self.layers, data_spec)
caslib=cas_lib_name,
labelTable=label_table)
elif has_gpu_model and has_embedding_dim:
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
gpuModel=use_gpu,
textEmbeddingDim=embedding_dim,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name,
labelTable=label_table)
else:
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights',
model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
dataSpecs=data_spec,
textEmbeddingDim=embedding_dim,
formatType=format_type,
weightFilePath=file_name,
caslib=cas_lib_name,
labelTable=label_table)
else:
if has_gpu_model:
with sw.option_context(print_messages = False):
rt = self._retrieve_('deeplearn.dlimportmodelweights', model=self.model_table,
modelWeights=dict(replace=True, name=self.model_name + '_weights'),
formatType=format_type, weightFilePath=file_name, caslib=cas_lib_name,
gpuModel=use_gpu,