Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@ui.pass_index(app_name='su-ingest')
def cli(index, storage_units):
for storage_unit_path in storage_units:
process_storage_unit(storage_unit_path, index=index)
@ui.pass_index(app_name=APP_NAME)
@task_app_options
@click.option('--num-tasks', type=int, help='Sample argument: number of tasks to generate')
@task_app(make_config=make_config, make_tasks=make_tasks)
def app_main(db_index, config, tasks, executor, **opts):
"""
make_config => config
config => make_tasks => tasks
"""
from pickle import dumps
unused(db_index, opts, config)
click.echo('Using executor {}'.format(repr(executor)))
task_runner = wrap_task(run_task, config['op'])
click.echo('Task function size: {}'.format(
@ui.pass_index(expect_initialised=False)
@click.argument('trash_path', type=click.Path(exists=True, readable=True, writable=True))
def restore(index: Index, trash_path: str, dry_run: bool):
trash_base = Path(trash_path)
assert trash_base.exists()
for trashed_nc in trash_base.rglob('L*.nc'):
restorable_path = _should_restore(index, trashed_nc)
if restorable_path:
_LOG.info("trash.restore", trash_path=trashed_nc, original_path=restorable_path)
if not dry_run:
Path(trashed_nc).rename(restorable_path)
if filename.exists():
filename.unlink()
temp_filename.rename(filename)
@click.group(name=APP_NAME, help='NCML creation utility')
@datacube.ui.click.version_option
def ncml_app():
pass
#: pylint: disable=invalid-name
command_options = datacube.ui.click.compose(
datacube.ui.click.config_option,
datacube.ui.click.pass_index(app_name=APP_NAME),
datacube.ui.click.logfile_option,
task_app.cell_index_option,
task_app.cell_index_list_option,
task_app.queue_size_option,
task_app.load_tasks_option,
task_app.save_tasks_option,
datacube.ui.click.executor_cli_options,
click.option('--export-path', 'export_path',
help='Write the stacked files to an external location instead of the location in the app config',
default=None,
type=click.Path(exists=True, writable=True, file_okay=False)),
)
@ncml_app.command(short_help='Create an ncml file')
@command_options
@ui.pass_index()
def main(index, dry_run, ids):
exists = 0
missing = 0
removed = 0
missing_datasets = []
for dataset_id in ids:
with index._db.begin() as db:
some_exist = False
to_remove = []
for uri in db.get_locations(dataset_id):
local_path = utils.uri_to_local_path(uri)
if local_path.exists():
exists += 1
some_exist = True
@ui.pass_index()
def update_cmd(index, keys_that_can_change, dry_run, location_policy, dataset_paths):
def loc_action(action, new_ds, existing_ds, action_name):
if len(existing_ds.uris) == 0:
return None
if len(existing_ds.uris) > 1:
_LOG.warning("Refusing to %s old location, there are several", action_name)
return None
new_uri, = new_ds.uris
old_uri, = existing_ds.uris
if new_uri == old_uri:
return None
if dry_run:
@ui.pass_index(app_name='agdc-output_products')
def main(index, stats_config_file, executor):
app = create_stats_app(stats_config_file, index)
output_products = app.make_output_products(index)
# TODO: Store output products in database
completed_tasks = app.run(index, executor, output_products)
for ds in completed_tasks:
print('Completed: %s' % ds)
# index.datasets.add(ds, skip_sources=True) # index completed work
@ui.pass_index()
@click.argument('files',
type=click.Path(exists=True, readable=True),
nargs=-1)
def archived(index: Index,
dry_run: bool,
files: List[str],
min_trash_age_hours: int):
"""
Clean-up archived locations.
Find any locations within the given folders that have been archived in the index.
It will only trash locations that were archived more than min-trash-age-hours
ago (default: 3 days).
"""
total_count = 0
@ui.pass_index(app_name='agdc-moviemaker')
def sequencer(index, app_config, load_bounds_from, start_date, end_date, products, executor, expressions):
products = products or DEFAULT_PRODUCTS
_, config = next(read_documents(app_config))
jobname = 'foo'
job = {
'bottom': -1619354.555,
'left': 1188490.47,
'top': -1578182.723,
'right': 1213611.437
},
if load_bounds_from:
crs, (left, bottom, right, top) = bounds_from_file(load_bounds_from)
else:
left, right, top, bottom = job['left'], job['right'], job['top'], job['bottom']
@ui.pass_index
def stack(index, executor, types):
if not types:
storage_types = index.storage.types.get_all()
else:
storage_types = [index.storage.types.get_by_name(name) for name in types]
tasks = []
for storage_type in storage_types:
# TODO: figure out start date - search ordered by time, get first, round down
start_date = datetime(1986, 1, 1, tzinfo=tzutc())
# TODO: figure out end date - now, round down
end_date = datetime(2016, 1, 1, tzinfo=tzutc())
tasks += list(_stack_storage_type(storage_type, start_date, end_date, index))
stacked = executor.map(_do_stack, tasks)
for (storage_units, filename), stacked in zip(tasks, (executor.result(s) for s in stacked)):