Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
ck.out('')
ck.out('Dataset UOA: '+str(q[1]))
ck.out('Dataset file: '+str(q[2]))
ck.out('Target OS: '+str(q[4]))
ck.out('OpenCL platform: '+str(q[5]))
ck.out('OpenCL device: '+str(q[6]))
ck.out('Compiler: '+str(q[8]))
# Convert to csv
ii={"action":"convert_table_to_csv",
"module_uoa":"experiment",
"table":table,
"keys":real_keys,
"file_name":"start_analysis_tmp.csv"}
r=ck.access(ii)
if r['return']>0: ck.err(r)
# Finish
ck.out('')
ck.out('Thank you for using CK!')
exit(0)
'repetitions':num_repetitions,
'record':'yes',
'record_failed':'yes',
'record_params':{
'search_point_by_features':'yes'
},
'record_repo':record_repo,
'record_uoa':record_uoa,
'tags':[ 'explore-batch-size-libs-models', model_tags, lib_tags, platform_tags ],
'pipeline':cpipeline,
'out':'con'}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
return {'return':0}
'CK_RESULTS_DIR':'predictions',
'CK_SKIP_IMAGES':0
},
'cpu_freq':'max',
'gpu_freq':'max',
'flags':'-O3',
'speed':'no',
'energy':'no',
'skip_print_timers':'yes',
'out':'con'
}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
ready=r.get('ready','')
if ready!='yes':
return {'return':11, 'error':'pipeline not ready'}
state=r['state']
tmp_dir=state['tmp_dir']
# Remember resolved deps for this benchmarking session.
xcdeps=r.get('dependencies',{})
# Clean pipeline.
if 'ready' in r: del(r['ready'])
cpipeline=copy.deepcopy(pipeline)
# Reset deps and change UOA.
new_deps={'lib-tensorrt':copy.deepcopy(depl),
'caffemodel':copy.deepcopy(depm)}
new_deps['lib-tensorrt']['uoa']=lib_uoa
new_deps['caffemodel']['uoa']=model_uoa
jj={'action':'resolve',
'module_uoa':'env',
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'deps':new_deps}
r=ck.access(jj)
if r['return']>0: return r
cpipeline['dependencies'].update(new_deps)
pipeline_name = '%s.json' % record_uoa
ii={'action':'autotune',
'module_uoa':'pipeline',
'data_uoa':'program',
'choices_order':[
[
'##choices#env#CK_TENSORRT_ENABLE_FP16'
],
[
'##choices#env#CK_CAFFE_BATCH_SIZE'
'repetitions':num_repetitions,
'record':'yes',
'record_failed':'yes',
'record_params':{
'search_point_by_features':'yes'
},
'record_repo':record_repo,
'record_uoa':record_uoa,
'tags':[ 'explore-batch-size-libs-models', model_tags, lib_tags, platform_tags ],
'pipeline':cpipeline,
'out':'con'}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
return {'return':0}
if (arg.accuracy):
batch_count = len([f for f in os.listdir(img_dir_val)
if f.endswith('.JPEG') and os.path.isfile(os.path.join(img_dir_val, f))])
else:
batch_count = 1
ii={'action':'show',
'module_uoa':'env',
'tags':'dataset,imagenet,aux'}
rx=ck.access(ii)
if len(rx['lst']) == 0: return rx
img_dir_aux = rx['lst'][0]['meta']['env']['CK_ENV_DATASET_IMAGENET_AUX']
ii={'action':'load',
'module_uoa':'program',
'data_uoa':program}
rx=ck.access(ii)
if rx['return']>0: return rx
mm=rx['dict']
# Get compile-time and run-time deps.
cdeps=mm.get('compile_deps',{})
rdeps=mm.get('run_deps',{})
# Merge rdeps with cdeps for setting up the pipeline (which uses
# common deps), but tag them as "for_run_time".
for k in rdeps:
cdeps[k]=rdeps[k]
cdeps[k]['for_run_time']='yes'
print cdeps
depl=copy.deepcopy(cdeps['lib-tensorflow'])
if (arg.tos is not None) and (arg.did is not None):
tos=arg.tos
tdid=arg.did
'CK_ENV_DATASET_IMAGE_DIR':img_dir,
'CK_BATCH_COUNT':num_batches
},
'cpu_freq':'max',
'gpu_freq':'max',
'flags':'-O3',
'speed':'no',
'energy':'no',
'skip_print_timers':'yes',
'out':'con'
}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
ready=r.get('ready','')
if ready!='yes':
return {'return':11, 'error':'pipeline not ready'}
state=r['state']
tmp_dir=state['tmp_dir']
# Remember resolved deps for this benchmarking session.
xcdeps=r.get('dependencies',{})
# Clean pipeline.
if 'ready' in r: del(r['ready'])
# Host and target OS params.
hos=r['host_os_uoa']
hosd=r['host_os_dict']
tos=r['os_uoa']
tosd=r['os_dict']
tdid=r['device_id']
# program='mobilenets-armcl-opencl'
program='image-classification-tf-py'
ii={'action':'show',
'module_uoa':'env',
'tags':'dataset,imagenet,raw,val'}
rx=ck.access(ii)
if len(rx['lst']) == 0: return rx
# FIXME: It's probably better to use CK_ENV_DATASET_IMAGE_DIR.
img_dir_val = rx['lst'][0]['meta']['env']['CK_CAFFE_IMAGENET_VAL']
if (arg.accuracy):
batch_count = len([f for f in os.listdir(img_dir_val)
if f.endswith('.JPEG') and os.path.isfile(os.path.join(img_dir_val, f))])
else:
batch_count = 1
ii={'action':'show',
'module_uoa':'env',
'tags':'dataset,imagenet,aux'}
rx=ck.access(ii)
if len(rx['lst']) == 0: return rx
img_dir_aux = rx['lst'][0]['meta']['env']['CK_ENV_DATASET_IMAGENET_AUX']
'cpu_freq':'max',
'gpu_freq':'max',
'flags':'-O3',
'speed':'no',
'energy':'no',
'no_state_check':'yes',
'skip_calibration':'yes',
'skip_print_timers':'yes',
'out':'con',
}
r=ck.access(ii)
if r['return']>0: return r
fail=r.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+r.get('fail_reason','')+')'}
ready=r.get('ready','')
if ready!='yes':
return {'return':11, 'error':'pipeline not ready'}
state=r['state']
tmp_dir=state['tmp_dir']
# Remember resolved deps for this benchmarking session.
xcdeps=r.get('dependencies',{})