Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
preserve_deleted_files=self._preserve_deleted_files)
_, b = self._boto_bucket()
old_cat_fp = self.file_prepper_wrapped('.s3sup.catalogue.csv')
old_f = b.Object(old_cat_fp.s3_path())
new_cat_fp = self.file_prepper_wrapped('.s3sup.cat')
new_f = b.Object(new_cat_fp.s3_path())
hndl, tmpp = tempfile.mkstemp()
os.close(hndl)
try:
new_f.download_file(tmpp)
remote_cat.from_sqlite(tmpp)
except botocore.exceptions.NoCredentialsError:
raise click.UsageError(
'Cannot find AWS credentials.\n -> Configure AWS credentials '
' using any method that the underlying boto3 library supports:'
'\n -> https://boto3.amazonaws.com/v1/documentation/'
'api/latest/guide/configuration.html')
except botocore.exceptions.ClientError:
if self.verbose:
click.echo(
('Could not find SQLite based remote catalogue on S3 '
'(expected at {0}).').format(new_cat_fp.s3_path()))
try:
old_f.download_file(tmpp)
remote_cat.from_csv(tmpp)
click.echo(click.style((
'WARNING: After the next s3sup push, do not attempt to '
'use older versions of s3sup (0.3.0 or below) with this '
'project, as they will no longer be able to read the '
def update_archiver(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, stream_id, bucket_name, use_existing_bucket, start_position, batch_rollover_size_in_mbs, batch_rollover_time_in_seconds, if_match):
if isinstance(stream_id, six.string_types) and len(stream_id.strip()) == 0:
raise click.UsageError('Parameter --stream-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
if bucket_name is not None:
details['bucketName'] = bucket_name
if use_existing_bucket is not None:
details['useExistingBucket'] = use_existing_bucket
if start_position is not None:
details['startPosition'] = start_position
def update_analytics_instance(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, analytics_instance_id, description, email_notification, license_type, defined_tags, freeform_tags, if_match):
if isinstance(analytics_instance_id, six.string_types) and len(analytics_instance_id.strip()) == 0:
raise click.UsageError('Parameter --analytics-instance-id cannot be whitespace or empty string')
if not force:
if defined_tags or freeform_tags:
if not click.confirm("WARNING: Updates to defined-tags and freeform-tags will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
if description is not None:
details['description'] = description
if email_notification is not None:
def update_db_system(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, db_system_id, cpu_core_count, data_storage_size_in_gbs, defined_tags, freeform_tags, ssh_public_keys, version, if_match):
if isinstance(db_system_id, six.string_types) and len(db_system_id.strip()) == 0:
raise click.UsageError('Parameter --db-system-id cannot be whitespace or empty string')
if not force:
if defined_tags or freeform_tags or ssh_public_keys or version:
if not click.confirm("WARNING: Updates to defined-tags and freeform-tags and ssh-public-keys and version will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
details = {}
if cpu_core_count is not None:
details['cpuCoreCount'] = cpu_core_count
if data_storage_size_in_gbs is not None:
details['dataStorageSizeInGBs'] = data_storage_size_in_gbs
def delete_cluster(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, cluster_id, if_match):
if isinstance(cluster_id, six.string_types) and len(cluster_id.strip()) == 0:
raise click.UsageError('Parameter --cluster-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('container_engine', ctx)
result = client.delete_cluster(
cluster_id=cluster_id,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_work_request') and callable(getattr(client, 'get_work_request')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
def execute_stop_command(schedule_name, cli_args, print_fn):
if not _is_dagster_home_set():
raise click.UsageError(dagster_home_error_message_for_command('dagster schedule stop ...'))
handle = handle_for_repo_cli_args(cli_args)
repository = handle.build_repository_definition()
instance = DagsterInstance.get()
schedule_handle = handle.build_scheduler_handle(artifacts_dir=instance.schedules_directory())
if not schedule_handle:
print_fn("Scheduler not defined for repository {name}".format(name=repository.name))
return
scheduler = schedule_handle.get_scheduler()
try:
schedule = scheduler.stop_schedule(schedule_name)
except DagsterInvariantViolationError as ex:
def list_resource_types(ctx, from_json, all_pages, page_size, limit, page):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('resource_search', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_resource_types,
**kwargs
)
def list_db_system_patches(ctx, from_json, all_pages, page_size, db_system_id, limit, page):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
if isinstance(db_system_id, six.string_types) and len(db_system_id.strip()) == 0:
raise click.UsageError('Parameter --db-system-id cannot be whitespace or empty string')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
client = cli_util.build_client('database', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_db_system_patches,
db_system_id=db_system_id,
def register(ctx, pkgs=None, test=None):
"""
Run registration steps for the workflows in this container.
Run with the --test switch for a dry run to see what will be registered. A default launch plan will also be
created, if a role can be found in the environment variables.
"""
if pkgs:
raise click.UsageError("--pkgs must now be specified before the 'register' keyword on the command line")
ctx.obj[CTX_TEST] = test
def multi_calendar_select(ctx, include_calendars, exclude_calendars):
if include_calendars and exclude_calendars:
raise click.UsageError('Can\'t use both -a and -d.')
# if not isinstance(include_calendars, tuple):
# include_calendars = (include_calendars,)
# if not isinstance(exclude_calendars, tuple):
# exclude_calendars = (exclude_calendars,)
selection = set()
if include_calendars:
for cal_name in include_calendars:
if cal_name not in ctx.obj['conf']['calendars']:
raise click.BadParameter(
'Unknown calendar {}, run `khal printcalendars` to get a '
'list of all configured calendars.'.format(cal_name)
)
selection.update(include_calendars)