Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _get_details(in_iqms, modality):
in_prov = in_iqms.pop("provenance", {})
warn_dict = in_prov.pop("warnings", None)
sett_dict = in_prov.pop("settings", None)
wf_details = []
if modality == "bold":
bold_exclude_index = in_iqms.get("dumb_trs")
if bold_exclude_index is None:
config.loggers.cli.warning(
"Building bold report: no exclude index was found"
)
elif bold_exclude_index > 0:
msg = """\
<span class="problematic">Non-steady state (strong T1 contrast) has been detected in the \
first {} volumes</span>. They were excluded before generating any QC measures and plots."""
wf_details.append(msg.format(bold_exclude_index))
wf_details.append(
"Framewise Displacement was computed using <code>3dvolreg</code> (AFNI)")
fd_thres = sett_dict.pop("fd_thres")
if fd_thres is not None:
wf_details.append(
"Framewise Displacement threshold was defined at %f mm" % fd_thres
)
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
_plugin = plugin_settings.get("plugin")
if _plugin:
config.nipype.plugin = _plugin
config.nipype.plugin_args = plugin_settings.get("plugin_args", {})
config.nipype.nprocs = config.nipype.plugin_args.get(
"nprocs", config.nipype.nprocs
)
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
if 1 < config.nipype.nprocs < config.nipype.omp_nthreads:
config.loggers.cli.warning(
"Per-process threads (--omp-nthreads=%d) exceed total "
"threads (--nthreads/--n_cpus=%d)",
config.nipype.omp_nthreads,
config.nipype.nprocs,
)
bids_dir = config.execution.bids_dir
output_dir = config.execution.output_dir
work_dir = config.execution.work_dir
version = config.environment.version
# Ensure input and output folders are not the same
if output_dir == bids_dir:
parser.error(
"The selected output folder is the same as the input BIDS folder. "
"Please modify the output path (suggestion: %s)."
id_labels = list(set(def_comps) & set(dataframe.columns.ravel().tolist()))
dataframe["label"] = dataframe[id_labels].apply(
_format_labels, args=(id_labels,), axis=1
)
else:
dataframe = pd.read_csv(
csv_file, index_col=False, sep="\t", dtype={"bids_name": object}
)
dataframe = dataframe.rename(index=str, columns={"bids_name": "label"})
nPart = len(dataframe)
failed = None
if csv_failed is not None and op.isfile(csv_failed):
config.loggers.cli.warning(f'Found failed-workflows table "{csv_failed}"')
failed_df = pd.read_csv(csv_failed, index_col=False)
cols = list(set(id_labels) & set(failed_df.columns.ravel().tolist()))
try:
failed_df = failed_df.sort_values(by=cols)
except AttributeError:
failed_df = failed_df.sort(columns=cols)
# myfmt not defined
# failed = failed_df[cols].apply(myfmt, args=(cols,), axis=1).ravel().tolist()
csv_groups = []
datacols = dataframe.columns.ravel().tolist()
for group, units in QCGROUPS[mod]:
dfdict = {"iqm": [], "value": [], "label": [], "units": []}
mriqc_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
# Clean up master process before running workflow, which may create forks
gc.collect()
if not config.execution.dry_run:
# Warn about submitting measures BEFORE
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
# run MRIQC
mriqc_wf.run(**config.nipype.get_plugin())
# Warn about submitting measures AFTER
if not config.execution.no_sub:
config.loggers.cli.warning(config.DSA_MESSAGE)
config.loggers.cli.log(25, "Participant level finished successfully.")
# Set up group level
if "group" in config.workflow.analysis_level:
from ..utils.bids import DEFAULT_TYPES
from ..reports import group_html
from ..utils.misc import generate_tsv # , generate_pred
config.loggers.cli.info("Group level started...")
# Generate reports
mod_group_reports = []
for mod in config.execution.modalities or DEFAULT_TYPES:
output_dir = config.execution.output_dir
dataframe, out_tsv = generate_tsv(output_dir, mod)
# If there are no iqm.json files, nothing to do.
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
# Run parser
parse_args()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / ".mriqc.toml"
config.to_filename(config_file)
# Set up participant level
if "participant" in config.workflow.analysis_level:
config.loggers.cli.log(
25,
f"""
Running MRIQC version {config.environment.version}:
* BIDS dataset path: {config.execution.bids_dir}.
* Output folder: {config.execution.output_dir}.
* Analysis levels: {config.workflow.analysis_level}.
""",
)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))