Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
extra_args = environ.get("BW_SSH_ARGS", "").strip()
if extra_args:
ssh_command.extend(split(extra_args))
ssh_command.append(hostname)
ssh_command.append(wrapper_outer.format(quote(wrapper_inner.format(command))))
cmd_id = randstr(length=4).upper()
io.debug("running command with ID {}: {}".format(cmd_id, " ".join(ssh_command)))
ssh_process = Popen(
ssh_command,
preexec_fn=setpgrp,
stdin=PIPE,
stderr=stderr_fd_w,
stdout=stdout_fd_w,
)
io._ssh_pids.append(ssh_process.pid)
quit_event = Event()
stdout_thread = Thread(
args=(stdout_lb, stdout_fd_r, quit_event, True),
target=output_thread_body,
)
stderr_thread = Thread(
args=(stderr_lb, stderr_fd_r, quit_event, False),
target=output_thread_body,
)
stdout_thread.start()
stderr_thread.start()
try:
ssh_process.communicate()
finally:
def bw_hash(repo, args):
if args['group_membership'] and args['metadata']:
io.stdout(_(
"{x} Cannot hash group membership and metadata at the same time").format(x=red("!!!")
))
exit(1)
if args['group_membership'] and args['item']:
io.stdout(_("{x} Cannot hash group membership for an item").format(x=red("!!!")))
exit(1)
if args['item'] and args['metadata']:
io.stdout(_("{x} Items don't have metadata").format(x=red("!!!")))
exit(1)
if args['node_or_group']:
try:
target = repo.get_node(args['node_or_group'])
target_type = 'node'
except NoSuchNode:
try:
def upload(
hostname,
local_path,
remote_path,
add_host_keys=False,
group="",
mode=None,
owner="",
wrapper_inner="{}",
wrapper_outer="{}",
):
"""
Upload a file.
"""
io.debug(_("uploading {path} -> {host}:{target}").format(
host=hostname, path=local_path, target=remote_path))
temp_filename = ".bundlewrap_tmp_" + randstr()
scp_process = Popen(
[
"scp",
"-o",
"StrictHostKeyChecking=no" if add_host_keys else "StrictHostKeyChecking=yes",
local_path,
"{}:{}".format(hostname, temp_filename),
],
preexec_fn=setpgrp,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
)
"a required key in your .secrets.cfg)"
).format(
attrs=", ".join(sorted(self._faults_missing_for_attributes)),
item=self.id,
node=self.node.name,
))
return (self.STATUS_SKIPPED, self.SKIP_REASON_FAULT_UNAVAILABLE)
if not self.covered_by_autoonly_selector(autoonly_selector):
io.debug(_(
"autoonly does not match {item} on {node}"
).format(item=self.id, node=self.node.name))
return (self.STATUS_SKIPPED, self.SKIP_REASON_CMDLINE)
if self.covered_by_autoskip_selector(autoskip_selector):
io.debug(_(
"autoskip matches {item} on {node}"
).format(item=self.id, node=self.node.name))
return (self.STATUS_SKIPPED, self.SKIP_REASON_CMDLINE)
if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks):
return (self.STATUS_SKIPPED, self.SKIP_REASON_SOFTLOCK)
if interactive is False and self.attributes['interactive'] is True:
return (self.STATUS_SKIPPED, self.SKIP_REASON_INTERACTIVE_ONLY)
for item in self._precedes_items:
if item._triggers_preceding_items(interactive=interactive):
io.debug(_(
"preceding item {item} on {node} has been triggered by {other_item}"
).format(item=self.id, node=self.node.name, other_item=item.id))
self.has_been_triggered = True
def bw_verify(repo, args):
errors = []
node_stats = {}
pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
start_time = datetime.now()
io.progress_set_total(count_items(pending_nodes))
def tasks_available():
return bool(pending_nodes)
def next_task():
node = pending_nodes.pop()
return {
'target': node.verify,
'task_id': node.name,
'kwargs': {
'show_all': args['show_all'],
'workers': args['item_workers'],
},
}
def handle_result(task_id, return_value, duration):
self.__reset()
self.__nodes_that_never_ran.add(initial_node_name)
iterations = 0
while not QUIT_EVENT.is_set():
iterations += 1
if iterations > MAX_METADATA_ITERATIONS:
assert False
# TODO
#reactors = ""
#for node, reactor in sorted(reactors_that_changed_something_in_last_iteration):
# reactors += node + " " + reactor + "\n"
#raise ValueError(_(
# "Infinite loop detected between these metadata reactors:\n"
#) + reactors)
io.debug(f"metadata iteration #{iterations}")
jobmsg = _("{b} (iteration {i}, {nodes} nodes)").format(
b=bold(_("running metadata reactors")),
i=iterations,
nodes=len(self.__nodes_that_never_ran) + len(self.__nodes_that_ran_at_least_once),
)
with io.job(jobmsg):
try:
node_name = self.__nodes_that_never_ran.pop()
except KeyError:
pass
else:
self.__nodes_that_ran_at_least_once.add(node_name)
self.__initial_run_for_node(node_name)
continue
@io.job_wrapper(_("{} {} parsing bundle").format(bold("{0.node.name}"), bold("{0.name}")))
def bundle_attrs(self):
if not exists(self.bundle_file):
return {}
else:
return get_all_attrs_from_file(
self.bundle_file,
base_env={
'node': self.node,
'repo': self.repo,
},
def log_error(run_result):
if run_result.return_code != 0:
io.debug(run_result.stdout.decode('utf-8'))
io.debug(run_result.stderr.decode('utf-8'))