Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
@app.command_option('--limit', dest='limit', default=None, type=int,
help='Limit the number of total tasks to prune.')
def prune_tasks(args, options):
if len(args) == 0:
die('Must specify at least cluster.')
cluster = args[0]
t = TaskQuery()
if options.states:
t.statuses = set(map(ScheduleStatus._NAMES_TO_VALUES.get, options.states.split(',')))
if options.role:
t.role = options.role
if options.environment:
t.environment = options.environment
if options.limit:
t.limit = options.limit
@app.command_option(DEFAULT_SLA_PERCENTAGE_OPTION)
@app.command_option(DEFAULT_SLA_DURATION_OPTION)
@requires.exactly('cluster')
def sla_host_drain(cluster):
"""usage: sla_host_drain {--filename=filename | --hosts=hosts}
[--default_percentage=percentage]
[--default_duration=duration]
[--force_drain_timeout=timeout]
cluster
Asks the scheduler to drain the list of provided hosts in an SLA-aware manner.
The list of hosts is drained and marked in a drained state. This will kill
off any tasks currently running on these hosts, as well as prevent future
tasks from scheduling on these hosts while they are drained.
The hosts are left in maintenance mode upon completion. Use host_activate to
@app.command_option('--states', dest='states', default='RUNNING',
help='Only match tasks with given state(s).')
@app.command_option('-l', '--listformat', dest='listformat',
default="%role%/%jobName%/%shardId% %status%",
help='Format string of job/task items to print out.')
# TODO(ksweeney): Allow query by environment here.
def query(args, options):
"""usage: query --cluster=CLUSTER [--shards=N[,N,...]]
[--states=State[,State,...]]
[role [job]]
Query Mesos about jobs and tasks.
"""
def _convert_fmt_string(fmtstr):
import re
def convert(match):
return "%%(%s)s" % match.group(1)
@app.command_option(CLUSTER_OPTION)
@requires.nothing
def scheduler_unload_recovery():
"""usage: scheduler_unload_recovery --cluster=CLUSTER
Unloads a staged recovery.
"""
options = app.get_options()
check_and_log_response(MesosClientAPI(options.cluster, options.verbosity)
.unload_recovery())
@app.command_option(CLUSTER_INVOKE_OPTION)
@requires.exactly('role', 'package', 'version')
def package_set_live(role, package, version):
"""usage: package_set_live --cluster=CLUSTER role package version
Updates the 'live' label of a package to point to a specific version.
"""
_get_packer().set_live(role, package, version)
print('Version %s is now the LIVE version' % version)
@app.command_option('--force', dest='force', default=False, action='store_true',
help='Force expensive queries to run.')
@app.command_option('--shards', dest='shards', default=None,
help='Only match given shards of a job.')
@app.command_option('--states', dest='states', default='RUNNING',
help='Only match tasks with given state(s).')
@app.command_option('-l', '--listformat', dest='listformat',
default="%role%/%name%/%instanceId% %status%",
help='Format string of job/task items to print out.')
# TODO(ksweeney): Allow query by environment here.
def query(args, options):
"""usage: query [--force]
[--listformat=FORMAT]
[--shards=N[,N,...]]
[--states=State[,State,...]]
cluster [role [job]]
@app.command_option(CLUSTER_OPTION)
@app.command_option('--filename', dest='filename', default=None,
help='Name of the file with hostnames')
@app.command_option('--hosts', dest='hosts', default=None,
help='Comma separated list of hosts')
@app.command_option('--batch_size', dest='batch_size', default=0,
help='Number of machines to operate on.')
@app.command_option('--post_drain_script', dest='post_drain_script', default=None,
help='Path to a script to run for each host.')
def perform_maintenance_hosts():
"""usage: perform_maintenance --cluster=CLUSTER [--filename=filename]
[--hosts=hosts]
[--batch_size=num]
[--post_drain_script=path]
Asks the scheduler to remove any running tasks from the machine and remove it
from service temporarily, perform some action on them, then return the machines
@app.command_option('--raw', dest='raw', default=False, action='store_true',
help='Show the raw configuration.')
@app.command_option(ENVIRONMENT_BIND_OPTION)
@app.command_option(CLUSTER_CONFIG_OPTION)
@app.command_option(ENV_CONFIG_OPTION)
@app.command_option(JSON_OPTION)
@requires.exactly('cluster/role/env/job', 'config')
def inspect(job_spec, config_file):
"""usage: inspect cluster/role/env/job config
Verifies that a job can be parsed from a configuration file, and displays
the parsed configuration.
"""
options = app.get_options()
newcmd = ["job", "inspect", job_spec, config_file]
if options.json:
newcmd.append("--read-json")
@app.command_option(UNSAFE_SLA_HOSTS_FILE_OPTION)
@requires.exactly('cluster')
def host_drain(cluster):
"""usage: host_drain {--filename=filename | --hosts=hosts}
[--post_drain_script=path]
[--grouping=function]
[--override_percentage=percentage]
[--override_duration=duration]
[--override_reason=reason]
[--unsafe_hosts_file=unsafe_hosts_filename]
cluster
Asks the scheduler to start maintenance on the list of provided hosts (see host_deactivate
for more details) and drains any active tasks on them.
The list of hosts is drained and marked in a drained state. This will kill
off any tasks currently running on these hosts, as well as prevent future
@app.command_option(JSON_OPTION)
@app.command_option(HEALTH_CHECK_INTERVAL_SECONDS_OPTION)
@app.command_option(
'--force',
dest='force',
default=True, # TODO(maximk): Temporary bandaid for MESOS-4310 until a better fix is available.
action='store_true',
help='Turn off warning message that the update looks large enough to be disruptive.')
@requires.exactly('cluster/role/env/job', 'config')
def update(job_spec, config_file):
"""usage: update cluster/role/env/job config
Performs a rolling upgrade on a running job, using the update configuration
within the config file as a control for update velocity and failure tolerance.
Updates are fully controlled client-side, so aborting an update halts the
update and leaves the job in a 'locked' state on the scheduler.