Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _run_interface(self, runtime):
nii = nb.load(self.inputs.in_file)
zooms = nii.header.get_zooms()
size_diff = np.array(zooms[:3]) - (self.inputs.pixel_size - 0.1)
if np.all(size_diff >= -1e-3):
config.loggers.interface.info("Voxel size is large enough")
self._results["out_file"] = self.inputs.in_file
if isdefined(self.inputs.in_mask):
self._results["out_mask"] = self.inputs.in_mask
return runtime
config.loggers.interface.info(
"One or more voxel dimensions (%f, %f, %f) are smaller than "
"the requested voxel size (%f) - diff=(%f, %f, %f)",
zooms[0],
zooms[1],
zooms[2],
self.inputs.pixel_size,
size_diff[0],
size_diff[1],
size_diff[2],
)
# Figure out new matrix
# 1) Get base affine
aff_base = nii.header.get_base_affine()
aff_base_inv = np.linalg.inv(aff_base)
.. math::
\text{SNR} = \frac{\mu_F}{\sqrt{\frac{2}{4-\pi}}\,\sigma_\text{air}}.
:param float mu_fg: mean of foreground.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed SNR for the foreground segmentation
"""
if sigma_air < 1.0:
from .. import config
config.loggers.interface.warning(
f"SNRd - background sigma is too small ({sigma_air})"
)
sigma_air += 1.0
return float(DIETRICH_FACTOR * mu_fg / sigma_air)
)
x_df = x_df.drop(nan_labels)
# Print out some info
nsamples = len(x_df)
config.loggers.interface.info(
f'Created dataset X="{feat_file}", Y="{label_file}" (N={nsamples} valid samples)'
)
# Inform about ratings distribution
labels = sorted(list(set(x_df[rate_label].values.ravel().tolist())))
ldist = []
for l in labels:
ldist.append(int(np.sum(x_df[rate_label] == l)))
config.loggers.interface.info(
"Ratings distribution: %s (%s, %s)",
"/".join(["%d" % x for x in ldist]),
"/".join(["%.2f%%" % (100 * x / nsamples) for x in ldist]),
"accept/exclude" if len(ldist) == 2 else "exclude/doubtful/accept",
)
return x_df, feat_names
url.netloc,
path=path,
scheme=url.scheme,
port=port,
email=email,
)
try:
self._results["api_id"] = response.json()["_id"]
except (AttributeError, KeyError, ValueError):
# response did not give us an ID
errmsg = (
"QC metrics upload failed to create an ID for the record "
"uplOADED. rEsponse from server follows: {}".format(response.text)
)
config.loggers.interface.warning(errmsg)
if response.status_code == 201:
config.loggers.interface.info("QC metrics successfully uploaded.")
return runtime
errmsg = "QC metrics failed to upload. Status %d: %s" % (
response.status_code,
response.text,
)
config.loggers.interface.warning(errmsg)
if self.inputs.strict:
raise RuntimeError(response.text)
return runtime
if not isdefined(val) or key == "trait_added":
continue
if not self.expr.match(key) is None:
root_adds.append(key)
continue
key, val = _process_name(key, val)
self._out_dict[key] = val
for root_key in root_adds:
val = self.inputs._outputs.get(root_key, None)
if isinstance(val, dict):
self._out_dict.update(val)
else:
config.loggers.interface.warning(
'Output "%s" is not a dictionary (value="%s"), '
"discarding output.",
root_key,
str(val),
)
# Fill in the "bids_meta" key
id_dict = {}
for comp in list(BIDS_COMP.keys()):
comp_val = getattr(self.inputs, comp, None)
if isdefined(comp_val) and comp_val is not None:
id_dict[comp] = comp_val
id_dict["modality"] = self.inputs.modality
if isdefined(self.inputs.metadata) and self.inputs.metadata:
id_dict.update(self.inputs.metadata)
# Hash fields that may contain personal information
data["bids_meta"] = _hashfields(data["bids_meta"])
if email:
data["provenance"]["email"] = email
if path and not path.endswith("/"):
path += "/"
if path.startswith("/"):
path = path[1:]
headers = {"Authorization": SECRET_KEY, "Content-Type": "application/json"}
webapi_url = "{}://{}:{}/{}{}".format(scheme, loc, port, path, modality)
config.loggers.interface.info("MRIQC Web API: submitting to <%s>", webapi_url)
try:
# if the modality is bold, call "bold" endpoint
response = requests.post(webapi_url, headers=headers, data=dumps(data))
except requests.ConnectionError as err:
errmsg = (
"QC metrics failed to upload due to connection error shown below:\n%s" % err
)
return Bunch(status_code=1, text=errmsg)
return response
y_df = y_df[y_df["bids_ids"].isin(list(x_df.bids_ids.values.ravel()))]
# Drop indexing column
del x_df["bids_ids"]
del y_df["bids_ids"]
# Merge Y dataframe into X
x_df = pd.merge(x_df, y_df, on=bids_comps_x, how="left")
if merged_name is not None:
x_df.to_csv(merged_name, index=False)
# Drop samples with invalid rating
nan_labels = x_df[x_df[rate_label].isnull()].index.ravel().tolist()
if nan_labels:
config.loggers.interface.info(
f"Dropping {len(nan_labels)} samples for having non-numerical labels,"
)
x_df = x_df.drop(nan_labels)
# Print out some info
nsamples = len(x_df)
config.loggers.interface.info(
f'Created dataset X="{feat_file}", Y="{label_file}" (N={nsamples} valid samples)'
)
# Inform about ratings distribution
labels = sorted(list(set(x_df[rate_label].values.ravel().tolist())))
ldist = []
for l in labels:
ldist.append(int(np.sum(x_df[rate_label] == l)))
}
if "bg" not in output:
output["bg"] = {
"mean": 0.0,
"median": 0.0,
"p95": 0.0,
"p05": 0.0,
"k": 0.0,
"stdv": sqrt(sum(val["stdv"] ** 2 for _, val in list(output.items()))),
"mad": sqrt(sum(val["mad"] ** 2 for _, val in list(output.items()))),
"n": sum(val["n"] for _, val in list(output.items())),
}
if "bg" in output and output["bg"]["mad"] == 0.0 and output["bg"]["stdv"] > 1.0:
config.loggers.interface.warning(
"estimated MAD in the background was too small (MAD=%f)",
output["bg"]["mad"],
)
output["bg"]["mad"] = output["bg"]["stdv"] / DIETRICH_FACTOR
return output
def _run_interface(self, runtime):
# Squeeze 4th dimension if possible (#660)
nii = nb.squeeze_image(nb.load(self.inputs.in_file))
hdr = nii.header.copy()
if self.inputs.check_ras:
nii = nb.as_closest_canonical(nii)
if self.inputs.check_dtype:
changed = True
datatype = int(hdr["datatype"])
if datatype == 1:
config.loggers.interface.warning(
'Input image %s has a suspicious data type "%s"',
self.inputs.in_file,
hdr.get_data_dtype(),
)
# signed char and bool to uint8
if datatype == 1 or datatype == 2 or datatype == 256:
dtype = np.uint8
# int16 to uint16
elif datatype == 4:
dtype = np.uint16
# Signed long, long long, etc to uint32
elif datatype == 8 or datatype == 1024 or datatype == 1280:
dtype = np.uint32