Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def check_message(parameters):
# TODO: re-enable once this is merged into Clowder: https://opensource.ncsa.illinois.edu/bitbucket/projects/CATS/repos/clowder/pull-requests/883/overview
# fetch metadata from dataset to check if we should remove existing entry for this extractor first
md = extractors.download_dataset_metadata_jsonld(parameters['host'], parameters['secretKey'], parameters['datasetId'], extractorName)
for m in md:
if 'agent' in m and 'name' in m['agent']:
if m['agent']['name'].find(extractorName) > -1:
print("skipping, already done")
return False
#extractors.remove_dataset_metadata_jsonld(parameters['host'], parameters['secretKey'], parameters['datasetId'], extractorName)
# Check for a left and right file before beginning processing
found_left = False
found_right = False
for f in parameters['filelist']:
if 'filename' in f and f['filename'].endswith('_left.bin'):
found_left = True
elif 'filename' in f and f['filename'].endswith('_right.bin'):
found_right = True
def check_message(parameters):
# For now if the dataset already has metadata from this extractor, don't recreate
md = extractors.download_dataset_metadata_jsonld(parameters['host'], parameters['secretKey'], parameters['datasetId'], extractorName)
if len(md) > 0:
for m in md:
if 'agent' in m and 'name' in m['agent']:
if m['agent']['name'].find(extractorName) > -1:
print("skipping dataset %s, already processed" % parameters['datasetId'])
return False
# Expect at least 10 relevant files to execute this processing
relevantFiles = 0
for f in parameters['filelist']:
raw_name = re.findall(r"(VIS|NIR|vis|nir)_(SV|TV|sv|tv)(_\d+)*" , f["filename"])
if raw_name != []:
relevantFiles += 1
if relevantFiles >= 10:
return True
def process_dataset(parameters):
# TODO: re-enable once this is merged into Clowder: https://opensource.ncsa.illinois.edu/bitbucket/projects/CATS/repos/clowder/pull-requests/883/overview
# fetch metadata from dataset to check if we should remove existing entry for this extractor first
md = extractors.download_dataset_metadata_jsonld(parameters['host'], parameters['secretKey'], parameters['datasetId'], extractorName)
if len(md) > 0:
for m in md:
if 'agent' in m and 'name' in m['agent']:
if m['agent']['name'].find(extractorName) > -1:
print("skipping, already done")
return
#extractors.remove_dataset_metadata_jsonld(parameters['host'], parameters['secretKey'], parameters['datasetId'], extractorName)
# pass
# Compiled traits table
fields = ('plant_barcode', 'genotype', 'treatment', 'imagedate', 'sv_area', 'tv_area', 'hull_area',
'solidity', 'height', 'perimeter')
traits = {'plant_barcode' : '',
'genotype' : '',
'treatment' : '',