Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Flintrock will configure new slaves based on information queried
automatically from the master.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
provider_options = {
'min_root_ebs_size_gb': ec2_min_root_ebs_size_gb,
'spot_price': ec2_spot_price,
'tags': ec2_tags
}
else:
raise UnsupportedProviderError(provider)
if cluster.num_masters == 0:
raise Error(
"Cannot add slaves to cluster '{c}' since it does not "
Flintrock will return a non-zero code if any of the cluster nodes raises an error
while running the command.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.run_command_check()
logger.info("Running command on {target}...".format(
target="master only" if master_only else "cluster"))
cluster.run_command(
command=command,
master_only=master_only,
def destroy(cli_context, cluster_name, assume_yes, ec2_region, ec2_vpc_id):
"""
Destroy a cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to destroy this cluster?",
abort=True)
logger.info("Destroying {c}...".format(c=cluster.name))
cluster.destroy()
"""
Start an existing, stopped cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.start_check()
logger.info("Starting {c}...".format(c=cluster_name))
cluster.start(user=user, identity_file=identity_file)
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
# We assume POSIX for the remote path since Flintrock
# only supports clusters running CentOS / Amazon Linux.
if not posixpath.basename(remote_path):
remote_path = posixpath.join(remote_path, os.path.basename(local_path))
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
cluster.copy_file_check()
if not assume_yes and not master_only:
file_size_bytes = os.path.getsize(local_path)
num_nodes = len(cluster.slave_ips) + 1 # TODO: cluster.num_nodes
total_size_bytes = file_size_bytes * num_nodes
if total_size_bytes > 10 ** 6:
def stop(cli_context, cluster_name, ec2_region, ec2_vpc_id, assume_yes):
"""
Stop an existing, running cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=['--ec2-region'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
else:
raise UnsupportedProviderError(provider)
cluster.stop_check()
if not assume_yes:
cluster.print()
click.confirm(
text="Are you sure you want to stop this cluster?",
abort=True)
logger.info("Stopping {c}...".format(c=cluster_name))
cluster.stop()
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-identity-file',
'--ec2-user'],
scope=locals())
check_external_dependency('ssh')
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
# TODO: Check that master up first and error out cleanly if not
# via ClusterInvalidState.
cluster.login(user=user, identity_file=identity_file)
"""
Remove slaves from an existing cluster.
"""
provider = cli_context.obj['provider']
option_requires(
option='--provider',
conditional_value='ec2',
requires_all=[
'--ec2-region',
'--ec2-user',
'--ec2-identity-file'],
scope=locals())
if provider == 'ec2':
cluster = ec2.get_cluster(
cluster_name=cluster_name,
region=ec2_region,
vpc_id=ec2_vpc_id)
user = ec2_user
identity_file = ec2_identity_file
else:
raise UnsupportedProviderError(provider)
if num_slaves > cluster.num_slaves:
logger.warning(
"Warning: Cluster has {c} slave{cs}. "
"You asked to remove {n} slave{ns}."
.format(
c=cluster.num_slaves,
cs='' if cluster.num_slaves == 1 else 's',
n=num_slaves,