Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
varex = ppca.explained_variance_
voxel_comp_weights = np.dot(np.dot(data_z, comp_ts),
np.diag(1. / varex))
varex_norm = varex / varex.sum()
# Compute Kappa and Rho for PCA comps
eimum = np.atleast_2d(eim)
eimum = np.transpose(eimum, np.argsort(eimum.shape)[::-1])
eimum = eimum.prod(axis=1)
o = np.zeros((mask.shape[0], *eimum.shape[1:]))
o[mask, ...] = eimum
eimum = np.squeeze(o).astype(bool)
# Normalize each component's time series
vTmixN = stats.zscore(comp_ts, axis=0)
comptable, _, _, _ = metrics.dependence_metrics(data_cat,
data_oc,
comp_ts,
t2s,
tes,
ref_img,
reindex=False,
mmixN=vTmixN,
algorithm=None,
label='mepca_',
out_dir=out_dir,
verbose=verbose)
# varex_norm from PCA overrides varex_norm from dependence_metrics,
# but we retain the original
comptable['estimated normalized variance explained'] = \
comptable['normalized variance explained']
kdaw=10., rdaw=1.,
out_dir=out_dir,
verbose=verbose,
low_mem=low_mem)
mmix_orig = decomposition.tedica(dd, n_components, fixed_seed,
maxit, maxrestart)
if verbose and (source_tes == -1):
io.filewrite(utils.unmask(dd, mask),
op.join(out_dir, 'ts_OC_whitened.nii'), ref_img)
LGR.info('Making second component selection guess from ICA results')
# Estimate betas and compute selection metrics for mixing matrix
# generated from dimensionally reduced data using full data (i.e., data
# with thermal noise)
comptable, metric_maps, betas, mmix = metrics.dependence_metrics(
catd, data_oc, mmix_orig, t2s_limited, tes,
ref_img, reindex=True, label='meica_', out_dir=out_dir,
algorithm='kundu_v2', verbose=verbose)
comp_names = [io.add_decomp_prefix(comp, prefix='ica', max_value=comptable.index.max())
for comp in comptable.index.values]
mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
mixing_df.to_csv('ica_mixing.tsv', sep='\t', index=False)
betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask), mask)
io.filewrite(betas_oc,
op.join(out_dir, 'ica_components.nii.gz'),
ref_img)
comptable = metrics.kundu_metrics(comptable, metric_maps)
comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols)
else:
LGR.info('Using supplied mixing matrix from ICA')
algorithm='kundu_v2', verbose=verbose)
comp_names = [io.add_decomp_prefix(comp, prefix='ica', max_value=comptable.index.max())
for comp in comptable.index.values]
mixing_df = pd.DataFrame(data=mmix, columns=comp_names)
mixing_df.to_csv('ica_mixing.tsv', sep='\t', index=False)
betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask), mask)
io.filewrite(betas_oc,
op.join(out_dir, 'ica_components.nii.gz'),
ref_img)
comptable = metrics.kundu_metrics(comptable, metric_maps)
comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols)
else:
LGR.info('Using supplied mixing matrix from ICA')
mmix_orig = pd.read_table(op.join(out_dir, 'ica_mixing.tsv')).values
comptable, metric_maps, betas, mmix = metrics.dependence_metrics(
catd, data_oc, mmix_orig, t2s_limited, tes,
ref_img, label='meica_', out_dir=out_dir,
algorithm='kundu_v2', verbose=verbose)
betas_oc = utils.unmask(computefeats2(data_oc, mmix, mask), mask)
io.filewrite(betas_oc,
op.join(out_dir, 'ica_components.nii.gz'),
ref_img)
if ctab is None:
comptable = metrics.kundu_metrics(comptable, metric_maps)
comptable = selection.kundu_selection_v2(comptable, n_echos, n_vols)
else:
comptable = pd.read_csv(ctab, sep='\t', index_col='component')
comptable = selection.manual_selection(comptable, acc=manacc)
# Save decomposition