Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
runtime.append("--env=HOME=%s" % self.builder.outdir)
cidfile_path = None # type: Optional[str]
# add parameters to docker to write a container ID file
if runtimeContext.user_space_docker_cmd is None:
if runtimeContext.cidfile_dir:
cidfile_dir = runtimeContext.cidfile_dir
if not os.path.exists(str(cidfile_dir)):
_logger.error(
"--cidfile-dir %s error:\n%s",
cidfile_dir,
"directory doesn't exist, please create it first",
)
exit(2)
if not os.path.isdir(cidfile_dir):
_logger.error(
"--cidfile-dir %s error:\n%s",
cidfile_dir,
cidfile_dir + " is not a directory, " "please check it first",
)
exit(2)
else:
tmp_dir, tmp_prefix = os.path.split(runtimeContext.tmpdir_prefix)
cidfile_dir = tempfile.mkdtemp(prefix=tmp_prefix, dir=tmp_dir)
cidfile_name = datetime.datetime.now().strftime("%Y%m%d%H%M%S-%f") + ".cid"
if runtimeContext.cidfile_prefix is not None:
cidfile_name = str(runtimeContext.cidfile_prefix + "-" + cidfile_name)
cidfile_path = os.path.join(cidfile_dir, cidfile_name)
runtime.append("--cidfile=%s" % cidfile_path)
for key, value in self.environment.items():
runtime.append("--env=%s=%s" % (key, value))
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
if job_order_object is None:
if process.tool["inputs"]:
if toolparser is not None:
print("\nOptions for {} ".format(args.workflow))
toolparser.print_help()
_logger.error("")
_logger.error("Input object required, use --help for details")
exit(1)
else:
job_order_object = {}
if print_input_deps:
basedir = None # type: Optional[str]
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
self, job: Optional[JobsType], runtime_context: RuntimeContext,
) -> None:
"""Execute a single Job in a seperate thread."""
if job is not None:
with self.pending_jobs_lock:
self.pending_jobs.append(job)
with self.pending_jobs_lock:
n = 0
while (n + 1) <= len(self.pending_jobs):
job = self.pending_jobs[n]
if isinstance(job, JobBase):
if (job.builder.resources["ram"]) > self.max_ram or (
job.builder.resources["cores"]
) > self.max_cores:
_logger.error(
'Job "%s" cannot be run, requests more resources (%s) '
"than available on this host (max ram %d, max cores %d",
job.name,
job.builder.resources,
self.allocated_ram,
self.allocated_cores,
self.max_ram,
self.max_cores,
)
self.pending_jobs.remove(job)
return
if (
(self.allocated_ram + job.builder.resources["ram"])
> self.max_ram
or (self.allocated_cores + job.builder.resources["cores"])
CWLObjectType, make_valid_avro(self.outputs_record_schema, {}, set()),
)
make_avsc_object(convert_to_dict(self.outputs_record_schema), self.names)
if toolpath_object.get("class") is not None and not getdefault(
loadingContext.disable_js_validation, False
):
validate_js_options = (
None
) # type: Optional[Dict[str, Union[List[str], str, int]]]
if loadingContext.js_hint_options_file is not None:
try:
with open(loadingContext.js_hint_options_file) as options_file:
validate_js_options = json.load(options_file)
except (OSError, ValueError):
_logger.error(
"Failed to read options file %s",
loadingContext.js_hint_options_file,
)
raise
if self.doc_schema is not None:
validate_js_expressions(
toolpath_object,
self.doc_schema.names[toolpath_object["class"]],
validate_js_options,
)
dockerReq, is_req = self.get_requirement("DockerRequirement")
if (
dockerReq is not None
and "dockerOutputDirectory" in dockerReq
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool
)
else:
extracted = get_subgraph(
[
loadingContext.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
return tool
configure_logging(args, stderr_handler, runtimeContext)
if args.version:
print(versionfunc())
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)))
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
setattr(args, "workflow", "CWLFile")
else:
_logger.error("CWL document required, no input file was provided")
arg_parser().print_help()
return 1
if args.relax_path_checks:
command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
if args.provenance:
def setup_provenance(
args: argparse.Namespace, argsl: List[str], runtimeContext: RuntimeContext,
) -> Optional[int]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
return 1
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(cast(IO[str], log_file_io))
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
if isinstance(out, str):
stdout.write(out)
else:
stdout.write(json_dumps(out, indent=4, ensure_ascii=False))
stdout.write("\n")
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
if "name" in tool.tool:
del tool.tool["name"]
stdout.write(
json_dumps(
tool.tool, indent=4, sort_keys=True, separators=(",", ": ")
)
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool