How to use the pyclowder.extractors.upload_file_to_dataset function in pyclowder

To help you get started, we’ve selected a few pyclowder examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github terraref / computing-pipeline / scripts / plantcv / extractor_with_avg_traits / terra.plantcv.py View on Github external
traits['treatment'], 
                    traits['imagedate'],
                    np.mean(traits['sv_area']), 
                    traits['tv_area'], 
                    np.mean(traits['hull_area']),
                    np.mean(traits['solidity']), 
                    np.mean(traits['height']),
                    np.mean(traits['perimeter'])]


    outfile = 'avg_traits.csv'
    with open(outfile, 'w') as csv:
        csv.write(','.join(map(str, fields)) + '\n')
        csv.write(','.join(map(str, trait_list)) + '\n')
        csv.flush()
        extractors.upload_file_to_dataset(outfile, parameters)
    os.remove(outfile)

    # debug
    csv_data = ','.join(map(str, fields)) + '\n' + ','.join(map(str, trait_list)) + '\n'
    print csv_data

    metadata = {
        "@context": {
            "@vocab": "https://clowder.ncsa.illinois.edu/clowder/assets/docs/api/index.html#!/files/uploadToDataset"
        },
        "dataset_id": parameters["datasetId"],
        "content": {"status": "COMPLETED", "csv": csv_data},
        "agent": {
            "@type": "cat:extractor",
            "extractor_id": parameters['host'] + "/api/extractors/" + extractorName
        }
github terraref / computing-pipeline / scripts / hyperspectral / extractor / terra.hyperspectral.py View on Github external
# Invoke terraref.sh
	outFilePath = os.path.join(outputDirectory, get_output_filename(files['_raw']['filename']))
	print 'invoking terraref.sh to create: %s' % outFilePath
	returncode = subprocess.call(["bash", workerScript, "-d", "1", "-I", inputDirectory, "-O", outputDirectory])
	print 'done creating output file (%s)' % (returncode)

	if returncode != 0:
		print 'terraref.sh encountered an error'

	# Verify outfile exists and upload to clowder
	if os.path.exists(outFilePath):
		print 'output file detected'
		if returncode == 0:
			print 'uploading output file...'
			extractors.upload_file_to_dataset(filepath=outFilePath, parameters=parameters)
			print 'done uploading'
		# Clean up the output file.
		os.remove(outFilePath)
	else:
		print 'no output file was produced'

	print 'cleaning up...'
	# Clean up the input files.
	for fileExt in files:
		os.remove(files[fileExt]['path'])
	print 'done cleaning'
github terraref / computing-pipeline / scripts / stereoImager / extractor / terra.demosaic.py View on Github external
center_position = bin2tiff.get_position(metadata) # (x, y, z) in meters
    fov = bin2tiff.get_fov(metadata, center_position[2], left_shape) # (fov_x, fov_y) in meters; need to pass in the camera height to get correct fov
    left_position = [center_position[0]+bin2tiff.STEREO_OFFSET, center_position[1], center_position[2]]
    right_position = [center_position[0]-bin2tiff.STEREO_OFFSET, center_position[1], center_position[2]]
    left_gps_bounds = bin2tiff.get_bounding_box(left_position, fov) # (lat_max, lat_min, lng_max, lng_min) in decimal degrees
    right_gps_bounds = bin2tiff.get_bounding_box(right_position, fov)

    print("Creating demosaicked images")
    left_out = os.path.join(temp_out_dir, img_left[:-4] + '.jpg')
    left_image = bin2tiff.process_image(left_shape, img_left, left_out)
    right_out = os.path.join(temp_out_dir, img_right[:-4] + '.jpg')
    right_image = bin2tiff.process_image(right_shape, img_right, right_out)
    print("Uploading output JPGs to dataset")
    extractors.upload_file_to_dataset(left_out, parameters)
    extractors.upload_file_to_dataset(right_out, parameters)

    print("Creating geoTIFF images")
    left_tiff_out = os.path.join(temp_out_dir, img_left[:-4] + '.tif')
    bin2tiff.create_geotiff('left', left_image, left_gps_bounds, left_tiff_out)
    right_tiff_out = os.path.join(temp_out_dir, img_right[:-4] + '.tif')
    bin2tiff.create_geotiff('right', right_image, right_gps_bounds, right_tiff_out)
    print("Uploading output geoTIFFs to dataset")
    extractors.upload_file_to_dataset(left_tiff_out, parameters)
    extractors.upload_file_to_dataset(right_tiff_out, parameters)

    # Tell Clowder this is completed so subsequent file updates don't daisy-chain
    metadata = {
        "@context": {
            "@vocab": "https://clowder.ncsa.illinois.edu/clowder/assets/docs/api/index.html#!/files/uploadToDataset"
        },
        "dataset_id": parameters["datasetId"],
github terraref / computing-pipeline / scripts / plantcv / extractor / terra.plantcv.py View on Github external
"content": vn_traits[1],
            "agent": {
                "@type": "cat:extractor",
                "extractor_id": parameters['host'] + "/api/extractors/" + extractorName
            }
        }
        parameters["fileid"] = nir_id
        extractors.upload_file_metadata_jsonld(mdata=metadata, parameters=parameters)

    # compose the summary traits
    trait_list = pcia.generate_traits_list(traits)

    # generate output CSV
    outfile = 'avg_traits.csv'
    pcia.generate_average_csv(outfile, fields, trait_list)
    extractors.upload_file_to_dataset(outfile, parameters)
    os.remove(outfile)
    metadata = {
        "@context": {
            "@vocab": "https://clowder.ncsa.illinois.edu/clowder/assets/docs/api/index.html#!/files/uploadToDataset"
        },
        "dataset_id": parameters["datasetId"],
        "content": {"status": "COMPLETED"},
        "agent": {
            "@type": "cat:extractor",
            "extractor_id": parameters['host'] + "/api/extractors/" + extractorName
        }
    }
    extractors.upload_dataset_metadata_jsonld(mdata=metadata, parameters=parameters)
github terraref / computing-pipeline / scripts / stereoImager / extractor / terra.demosaic.py View on Github external
left_out = os.path.join(temp_out_dir, img_left[:-4] + '.jpg')
    left_image = bin2tiff.process_image(left_shape, img_left, left_out)
    right_out = os.path.join(temp_out_dir, img_right[:-4] + '.jpg')
    right_image = bin2tiff.process_image(right_shape, img_right, right_out)
    print("Uploading output JPGs to dataset")
    extractors.upload_file_to_dataset(left_out, parameters)
    extractors.upload_file_to_dataset(right_out, parameters)

    print("Creating geoTIFF images")
    left_tiff_out = os.path.join(temp_out_dir, img_left[:-4] + '.tif')
    bin2tiff.create_geotiff('left', left_image, left_gps_bounds, left_tiff_out)
    right_tiff_out = os.path.join(temp_out_dir, img_right[:-4] + '.tif')
    bin2tiff.create_geotiff('right', right_image, right_gps_bounds, right_tiff_out)
    print("Uploading output geoTIFFs to dataset")
    extractors.upload_file_to_dataset(left_tiff_out, parameters)
    extractors.upload_file_to_dataset(right_tiff_out, parameters)

    # Tell Clowder this is completed so subsequent file updates don't daisy-chain
    metadata = {
        "@context": {
            "@vocab": "https://clowder.ncsa.illinois.edu/clowder/assets/docs/api/index.html#!/files/uploadToDataset"
        },
        "dataset_id": parameters["datasetId"],
        "content": {"status": "COMPLETED"},
        "agent": {
            "@type": "cat:extractor",
            "extractor_id": parameters['host'] + "/api/extractors/" + extractorName
        }
    }
    extractors.upload_dataset_metadata_jsonld(mdata=metadata, parameters=parameters)
github terraref / computing-pipeline / scripts / stereoImager / extractor / terra.demosaic.py View on Github external
right_shape = bin2tiff.get_image_shape(metadata, 'right')

    center_position = bin2tiff.get_position(metadata) # (x, y, z) in meters
    fov = bin2tiff.get_fov(metadata, center_position[2], left_shape) # (fov_x, fov_y) in meters; need to pass in the camera height to get correct fov
    left_position = [center_position[0]+bin2tiff.STEREO_OFFSET, center_position[1], center_position[2]]
    right_position = [center_position[0]-bin2tiff.STEREO_OFFSET, center_position[1], center_position[2]]
    left_gps_bounds = bin2tiff.get_bounding_box(left_position, fov) # (lat_max, lat_min, lng_max, lng_min) in decimal degrees
    right_gps_bounds = bin2tiff.get_bounding_box(right_position, fov)

    print("Creating demosaicked images")
    left_out = os.path.join(temp_out_dir, img_left[:-4] + '.jpg')
    left_image = bin2tiff.process_image(left_shape, img_left, left_out)
    right_out = os.path.join(temp_out_dir, img_right[:-4] + '.jpg')
    right_image = bin2tiff.process_image(right_shape, img_right, right_out)
    print("Uploading output JPGs to dataset")
    extractors.upload_file_to_dataset(left_out, parameters)
    extractors.upload_file_to_dataset(right_out, parameters)

    print("Creating geoTIFF images")
    left_tiff_out = os.path.join(temp_out_dir, img_left[:-4] + '.tif')
    bin2tiff.create_geotiff('left', left_image, left_gps_bounds, left_tiff_out)
    right_tiff_out = os.path.join(temp_out_dir, img_right[:-4] + '.tif')
    bin2tiff.create_geotiff('right', right_image, right_gps_bounds, right_tiff_out)
    print("Uploading output geoTIFFs to dataset")
    extractors.upload_file_to_dataset(left_tiff_out, parameters)
    extractors.upload_file_to_dataset(right_tiff_out, parameters)

    # Tell Clowder this is completed so subsequent file updates don't daisy-chain
    metadata = {
        "@context": {
            "@vocab": "https://clowder.ncsa.illinois.edu/clowder/assets/docs/api/index.html#!/files/uploadToDataset"
        },