Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
manacc = [int(comp) for comp in manacc.split(',')]
if ctab and not mixm:
LGR.warning('Argument "ctab" requires argument "mixm".')
ctab = None
elif ctab and (manacc is None):
LGR.warning('Argument "ctab" requires argument "manacc".')
ctab = None
elif manacc is not None and not mixm:
LGR.warning('Argument "manacc" requires argument "mixm".')
manacc = None
RepLGR.info("TE-dependence analysis was performed on input data.")
if mask is None:
LGR.info('Computing EPI mask from first echo')
first_echo_img = io.new_nii_like(ref_img, catd[:, 0, :])
mask = compute_epi_mask(first_echo_img)
RepLGR.info("An initial mask was generated from the first echo using "
"nilearn's compute_epi_mask function.")
else:
# TODO: add affine check
LGR.info('Using user-defined mask')
RepLGR.info("A user-defined mask was applied to the data.")
mask, masksum = utils.make_adaptive_mask(catd, mask=mask, getsum=True)
LGR.debug('Retaining {}/{} samples'.format(mask.sum(), n_samp))
io.filewrite(masksum, op.join(out_dir, 'adaptive_mask.nii'), ref_img)
os.chdir(out_dir)
LGR.info('Computing T2* map')
t2s_limited, s0_limited, t2s_full, s0_full = decay.fit_decay(
LGR.info('Computing PCA of spatially concatenated multi-echo data')
data = data_cat[mask, ...]
else:
LGR.info('Computing PCA of echo #{0}'.format(','.join([str(ee) for ee in source_tes])))
data = np.stack([data_cat[mask, ee, :] for ee in source_tes - 1], axis=1)
eim = np.squeeze(_utils.eimask(data))
data = np.squeeze(data[eim])
data_z = ((data.T - data.T.mean(axis=0)) / data.T.std(axis=0)).T # var normalize ts
data_z = (data_z - data_z.mean()) / data_z.std() # var normalize everything
if algorithm in ['mdl', 'aic', 'kic']:
data_img = io.new_nii_like(
ref_img, utils.unmask(utils.unmask(data, eim), mask))
mask_img = io.new_nii_like(ref_img,
utils.unmask(eim, mask).astype(int))
voxel_comp_weights, varex, varex_norm, comp_ts = ma_pca.ma_pca(
data_img, mask_img, algorithm)
elif algorithm == 'mle':
voxel_comp_weights, varex, varex_norm, comp_ts = run_mlepca(data_z)
elif low_mem:
voxel_comp_weights, varex, comp_ts = low_mem_pca(data_z)
varex_norm = varex / varex.sum()
else:
ppca = PCA(copy=False, n_components=(n_vols - 1))
ppca.fit(data_z)
comp_ts = ppca.components_.T
varex = ppca.explained_variance_
voxel_comp_weights = np.dot(np.dot(data_z, comp_ts),
np.diag(1. / varex))
varex_norm = varex / varex.sum()
data = data_oc[mask, :][:, np.newaxis, :]
elif len(source_tes) == 1 and source_tes[0] == 0:
LGR.info('Computing PCA of spatially concatenated multi-echo data')
data = data_cat[mask, ...]
else:
LGR.info('Computing PCA of echo #{0}'.format(','.join([str(ee) for ee in source_tes])))
data = np.stack([data_cat[mask, ee, :] for ee in source_tes - 1], axis=1)
eim = np.squeeze(_utils.eimask(data))
data = np.squeeze(data[eim])
data_z = ((data.T - data.T.mean(axis=0)) / data.T.std(axis=0)).T # var normalize ts
data_z = (data_z - data_z.mean()) / data_z.std() # var normalize everything
if algorithm in ['mdl', 'aic', 'kic']:
data_img = io.new_nii_like(
ref_img, utils.unmask(utils.unmask(data, eim), mask))
mask_img = io.new_nii_like(ref_img,
utils.unmask(eim, mask).astype(int))
voxel_comp_weights, varex, varex_norm, comp_ts = ma_pca.ma_pca(
data_img, mask_img, algorithm)
elif algorithm == 'mle':
voxel_comp_weights, varex, varex_norm, comp_ts = run_mlepca(data_z)
elif low_mem:
voxel_comp_weights, varex, comp_ts = low_mem_pca(data_z)
varex_norm = varex / varex.sum()
else:
ppca = PCA(copy=False, n_components=(n_vols - 1))
ppca.fit(data_z)
comp_ts = ppca.components_.T
varex = ppca.explained_variance_
voxel_comp_weights = np.dot(np.dot(data_z, comp_ts),
np.squeeze(utils.unmask(F_S0_maps[:, i_comp], mask)))
F_S0_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize, threshold=fmin, mask=mask,
binarize=True)
countsigFS0 = F_S0_clmaps[:, i_comp].sum()
# Cluster-extent threshold and binarize Z-maps with CDT of p < 0.05
ccimg = io.new_nii_like(
ref_img,
np.squeeze(utils.unmask(Z_maps[:, i_comp], mask)))
Z_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize, threshold=1.95, mask=mask,
binarize=True)
# Cluster-extent threshold and binarize ranked signal-change map
ccimg = io.new_nii_like(
ref_img,
utils.unmask(stats.rankdata(tsoc_Babs[:, i_comp]), mask))
Br_R2_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize,
threshold=(max(tsoc_Babs.shape) - countsigFR2), mask=mask,
binarize=True)
Br_S0_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize,
threshold=(max(tsoc_Babs.shape) - countsigFS0), mask=mask,
binarize=True)
del ccimg, tsoc_Babs
if algorithm == 'kundu_v2':
# WTS, tsoc_B, PSC, and F_S0_maps are not used by Kundu v2.5
selvars = ['Z_maps', 'F_R2_maps',
'Z_clmaps', 'F_R2_clmaps', 'F_S0_clmaps',
np.squeeze(utils.unmask(F_R2_maps[:, i_comp], mask)))
F_R2_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize, threshold=fmin, mask=mask,
binarize=True)
countsigFR2 = F_R2_clmaps[:, i_comp].sum()
ccimg = io.new_nii_like(
ref_img,
np.squeeze(utils.unmask(F_S0_maps[:, i_comp], mask)))
F_S0_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize, threshold=fmin, mask=mask,
binarize=True)
countsigFS0 = F_S0_clmaps[:, i_comp].sum()
# Cluster-extent threshold and binarize Z-maps with CDT of p < 0.05
ccimg = io.new_nii_like(
ref_img,
np.squeeze(utils.unmask(Z_maps[:, i_comp], mask)))
Z_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize, threshold=1.95, mask=mask,
binarize=True)
# Cluster-extent threshold and binarize ranked signal-change map
ccimg = io.new_nii_like(
ref_img,
utils.unmask(stats.rankdata(tsoc_Babs[:, i_comp]), mask))
Br_R2_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize,
threshold=(max(tsoc_Babs.shape) - countsigFR2), mask=mask,
binarize=True)
Br_S0_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize,
comptable.index.name = 'component'
# Generate clustering criteria for component selection
if algorithm in ['kundu_v2', 'kundu_v3']:
Z_clmaps = np.zeros([n_voxels, n_components], bool)
F_R2_clmaps = np.zeros([n_voxels, n_components], bool)
F_S0_clmaps = np.zeros([n_voxels, n_components], bool)
Br_R2_clmaps = np.zeros([n_voxels, n_components], bool)
Br_S0_clmaps = np.zeros([n_voxels, n_components], bool)
LGR.info('Performing spatial clustering of components')
csize = np.max([int(n_voxels * 0.0005) + 5, 20])
LGR.debug('Using minimum cluster size: {}'.format(csize))
for i_comp in range(n_components):
# Cluster-extent threshold and binarize F-maps
ccimg = io.new_nii_like(
ref_img,
np.squeeze(utils.unmask(F_R2_maps[:, i_comp], mask)))
F_R2_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize, threshold=fmin, mask=mask,
binarize=True)
countsigFR2 = F_R2_clmaps[:, i_comp].sum()
ccimg = io.new_nii_like(
ref_img,
np.squeeze(utils.unmask(F_S0_maps[:, i_comp], mask)))
F_S0_clmaps[:, i_comp] = utils.threshold_map(
ccimg, min_cluster_size=csize, threshold=fmin, mask=mask,
binarize=True)
countsigFS0 = F_S0_clmaps[:, i_comp].sum()
# Cluster-extent threshold and binarize Z-maps with CDT of p < 0.05