How to use the pyclowder.files.upload_metadata function in pyclowder

To help you get started, we’ve selected a few pyclowder examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github terraref / computing-pipeline / scripts / rebuild_scripts / loadDanforthSnapshots.py View on Github external
logger.debug("Uploading %s" % img_file)
                img_path = os.path.join(experiment_root, snap_dir, img_file)
                img_md = formatImageMetadata(img_file, experiment_md['metadata'], snap_details)
                file_md = {
                    "@context": ["https://clowder.ncsa.illinois.edu/contexts/metadata.jsonld"],
                    "content": img_md,
                    "agent": {
                        "@type": "cat:user",
                        "user_id": "%sapi/users/%s" % (clowder_host, clowder_uid)
                    }
                }
                if not dry_run:
                    file_id = upload_to_dataset(conn, clowder_host, clowder_user, clowder_pass, snap_dataset, img_path)
                    logger.debug("Created file %s [%s]" % (img_file, file_id))
                    file_md["file_id"] = file_id
                    upload_file_metadata(conn, clowder_host, clowder_key, file_id, file_md)
                    logger.debug("Uploaded metadata to [%s]" % file_id)
                else:
                    logger.debug("Skipping file %s [%s]" % (img_file, "DRY RUN"))

            # Submit new dataset for extraction to plantCV extractor
            if not dry_run:
                extractor = "terra.lemnatec.plantcv"
                logger.debug("Submitting dataset [%s] to %s" % (snap_dataset, extractor))
                submit_extraction(conn, clowder_host, clowder_key, snap_dataset, extractor)

    logger.debug("Experiment uploading complete.")

else:
    logger.debug("%s does not exist" % experiment_root)
    sys.exit(1)
github terraref / computing-pipeline / extractors / geostreams / terra_geostreams.py View on Github external
"source": row['source'],
                    "value": row['value']
                }
                trait = row['trait']

                create_datapoint_with_dependencies(connector, host, secret_key, trait,
                                                   (centroid_lonlat[1], centroid_lonlat[0]), time_fmt, time_fmt,
                                                   dpmetadata, timestamp)
                successful_plots += 1

        # Add metadata to original dataset indicating this was run
        self.log_info(resource, "updating file metadata (%s)" % resource['id'])
        ext_meta = build_metadata(host, self.extractor_info, resource['id'], {
            "plots_processed": successful_plots,
        }, 'file')
        upload_metadata(connector, host, secret_key, resource['id'], ext_meta)

        self.end_message(resource)
github terraref / computing-pipeline / extractors / betydb / terra_betydb.py View on Github external
def process_message(self, connector, host, secret_key, resource, parameters):
        self.start_message(resource)

        # submit CSV to BETY
        self.log_info(resource, "submitting CSV to bety")
        submit_traits(resource['local_paths'][0], betykey=self.bety_key)

        # Add metadata to original dataset indicating this was run
        self.log_info(resource, "updating file metadata (%s)" % resource['id'])
        ext_meta = build_metadata(host, self.extractor_info, resource['id'], {
            "betydb_link": "https://terraref.ncsa.illinois.edu/bety/api/v1/variables?name=canopy_cover"
        }, 'file')
        upload_metadata(connector, host, secret_key, resource['id'], ext_meta)

        self.end_message(resource)