Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
pickle.dump(pcastate, handle)
except TypeError:
LGR.warning('Could not save PCA solution')
else: # if loading existing state
voxel_comp_weights = pcastate['voxel_comp_weights']
varex = pcastate['varex']
comp_ts = pcastate['comp_ts']
ct_df = pcastate['comptable']
np.savetxt('mepca_mix.1D', comp_ts.T)
# write component maps to 4D image
comp_maps = np.zeros((OCcatd.shape[0], comp_ts.shape[0]))
for i_comp in range(comp_ts.shape[0]):
temp_comp_ts = comp_ts[i_comp, :][:, None]
comp_map = utils.unmask(model.computefeats2(OCcatd, temp_comp_ts, mask), mask)
comp_maps[:, i_comp] = np.squeeze(comp_map)
io.filewrite(comp_maps, 'mepca_OC_components.nii', ref_img)
# Add new columns to comptable for classification
ct_df['classification'] = 'accepted'
ct_df['rationale'] = ''
# Select components using decision tree
if method == 'kundu':
ct_df = kundu_tedpca(ct_df, n_echos, kdaw, rdaw, stabilize=False)
elif method == 'kundu-stabilize':
ct_df = kundu_tedpca(ct_df, n_echos, kdaw, rdaw, stabilize=True)
elif method == 'mle':
LGR.info('Selected {0} components with MLE dimensionality '
'detection'.format(ct_df.shape[0]))
Mixing matrix for converting input data to component space, where `C`
is components and `T` is the same as in `data`
mask : (S,) array_like
Boolean mask array
acc : :obj:`list`
List of accepted components used to subset `mmix`
Returns
-------
hikts : (S x T) :obj:`numpy.ndarray`
Time series reconstructed using only components in `acc`
rest : (S x T) :obj:`numpy.ndarray`
Original data with `hikts` removed
"""
cbetas = model.get_lstsq_coeffs(data - data.mean(axis=-1, keepdims=True),
mmix, mask)
betas = cbetas[mask]
if len(acc) != 0:
hikts = utils.unmask(betas[:, acc].dot(mmix.T[acc, :]), mask)
else:
hikts = None
return hikts, data - hikts
pickle.dump(pcastate, handle)
except TypeError:
LGR.warning('Could not save PCA solution')
else: # if loading existing state
voxel_comp_weights = pcastate['voxel_comp_weights']
varex = pcastate['varex']
comp_ts = pcastate['comp_ts']
comptable = pcastate['comptable']
np.savetxt('mepca_mix.1D', comp_ts.T)
# write component maps to 4D image
comp_maps = np.zeros((OCcatd.shape[0], comp_ts.shape[0]))
for i_comp in range(comp_ts.shape[0]):
temp_comp_ts = comp_ts[i_comp, :][:, None]
comp_map = utils.unmask(model.computefeats2(OCcatd, temp_comp_ts, mask), mask)
comp_maps[:, i_comp] = np.squeeze(comp_map)
io.filewrite(comp_maps, 'mepca_OC_components.nii', ref_img)
# Add new columns to comptable for classification
comptable['classification'] = 'accepted'
comptable['rationale'] = ''
# Select components using decision tree
if method == 'kundu':
comptable = kundu_tedpca(comptable, n_echos, kdaw, rdaw, stabilize=False)
elif method == 'kundu-stabilize':
comptable = kundu_tedpca(comptable, n_echos, kdaw, rdaw, stabilize=True)
elif method == 'mle':
LGR.info('Selected {0} components with MLE dimensionality '
'detection'.format(comptable.shape[0]))
====================== =================================================
Filename Content
====================== =================================================
hik_ts_[suffix].nii High-Kappa time series.
midk_ts_[suffix].nii Mid-Kappa time series.
low_ts_[suffix].nii Low-Kappa time series.
dn_ts_[suffix].nii Denoised time series.
====================== =================================================
"""
# mask and de-mean data
mdata = data[mask]
dmdata = mdata.T - mdata.T.mean(axis=0)
# get variance explained by retained components
betas = model.get_coeffs(dmdata.T, mmix, mask=None)
varexpl = (1 - ((dmdata.T - betas.dot(mmix.T))**2.).sum() /
(dmdata**2.).sum()) * 100
LGR.info('Variance explained by ICA decomposition: '
'{:.02f}%'.format(varexpl))
# create component and de-noised time series and save to files
hikts = betas[:, acc].dot(mmix.T[acc, :])
midkts = betas[:, midk].dot(mmix.T[midk, :])
lowkts = betas[:, rej].dot(mmix.T[rej, :])
dnts = data[mask] - lowkts - midkts
if len(acc) != 0:
fout = filewrite(utils.unmask(hikts, mask),
'hik_ts_{0}'.format(suffix), ref_img)
LGR.info('Writing high-Kappa time series: {}'.format(op.abspath(fout)))
# actual variance explained (normalized)
varex_norm = varex / varex.sum()
# Compute K and Rho for PCA comps
eimum = np.atleast_2d(eim)
eimum = np.transpose(eimum, np.argsort(eimum.shape)[::-1])
eimum = eimum.prod(axis=1)
o = np.zeros((mask.shape[0], *eimum.shape[1:]))
o[mask, ...] = eimum
eimum = np.squeeze(o).astype(bool)
# Normalize each component's time series
vTmixN = stats.zscore(comp_ts, axis=1).T
LGR.info('Making initial component selection guess from PCA results')
_, comptable, betasv, v_T = model.fitmodels_direct(
catd, comp_ts.T, eimum, t2s, t2sG, tes, combmode, ref_img,
mmixN=vTmixN, full_sel=False, label='mepca_',
verbose=verbose)
# varex_norm overrides normalized varex computed by fitmodels_direct
comptable['normalized variance explained'] = varex_norm
pcastate = {'method': method,
'voxel_comp_weights': voxel_comp_weights,
'varex': varex,
'comp_ts': comp_ts,
'comptable': comptable}
# Save state
LGR.info('Saving PCA results to: {}'.format(fname))
try:
# actual variance explained (normalized)
varex_norm = varex / varex.sum()
# Compute K and Rho for PCA comps
eimum = np.atleast_2d(eim)
eimum = np.transpose(eimum, np.argsort(eimum.shape)[::-1])
eimum = eimum.prod(axis=1)
o = np.zeros((mask.shape[0], *eimum.shape[1:]))
o[mask] = eimum
eimum = np.squeeze(o).astype(bool)
vTmix = comp_ts.T
vTmixN = ((vTmix.T - vTmix.T.mean(0)) / vTmix.T.std(0)).T
LGR.info('Making initial component selection guess from PCA results')
_, ct_df, betasv, v_T = model.fitmodels_direct(
catd, comp_ts.T, eimum, t2s, t2sG, tes, combmode, ref_img,
mmixN=vTmixN, full_sel=False, label='mepca_',
verbose=verbose)
# varex_norm overrides normalized varex computed by fitmodels_direct
ct_df['normalized variance explained'] = varex_norm
pcastate = {'method': method,
'voxel_comp_weights': voxel_comp_weights,
'varex': varex,
'comp_ts': comp_ts,
'comptable': ct_df}
# Save state
LGR.info('Saving PCA results to: {}'.format(fname))
try:
:py:func:`tedana.utils.io.write_split_ts`.
betas_OC.nii Full ICA coefficient feature set.
betas_hik_OC.nii Denoised ICA coefficient feature set.
feats_OC2.nii Z-normalized spatial component maps. Generated
by :py:func:`tedana.utils.io.writefeats`.
comp_table.txt Component table. Generated by
:py:func:`tedana.utils.io.writect`.
====================== =================================================
"""
fout = filewrite(ts, 'ts_OC', ref_img)
LGR.info('Writing optimally-combined time series: {}'.format(op.abspath(fout)))
varexpl = write_split_ts(ts, mmix, mask, acc, rej, midk, ref_img, suffix='OC')
ts_B = model.get_coeffs(ts, mmix, mask)
fout = filewrite(ts_B, 'betas_OC', ref_img)
LGR.info('Writing full ICA coefficient feature set: {}'.format(op.abspath(fout)))
if len(acc) != 0:
fout = filewrite(ts_B[:, acc], 'betas_hik_OC', ref_img)
LGR.info('Writing denoised ICA coefficient feature set: {}'.format(op.abspath(fout)))
fout = writefeats(split_ts(ts, mmix, mask, acc)[0],
mmix[:, acc], mask, ref_img, suffix='OC2')
LGR.info('Writing Z-normalized spatial component maps: {}'.format(op.abspath(fout)))
writect(comptable, n_vols, fixed_seed, acc, rej, midk, empty, ctname='comp_table.txt',
varexpl=varexpl)
LGR.info('Writing component table: {}'.format(op.abspath('comp_table.txt')))
====================== =================================================
Filename Content
====================== =================================================
hik_ts_[suffix].nii High-Kappa time series.
midk_ts_[suffix].nii Mid-Kappa time series.
low_ts_[suffix].nii Low-Kappa time series.
dn_ts_[suffix].nii Denoised time series.
====================== =================================================
"""
# mask and de-mean data
mdata = data[mask]
dmdata = mdata.T - mdata.T.mean(axis=0)
# get variance explained by retained components
betas = model.get_lstsq_coeffs(dmdata.T, mmix, mask=None)
varexpl = (1 - ((dmdata.T - betas.dot(mmix.T))**2.).sum() /
(dmdata**2.).sum()) * 100
LGR.info('Variance explained by ICA decomposition: '
'{:.02f}%'.format(varexpl))
# create component and de-noised time series and save to files
hikts = betas[:, acc].dot(mmix.T[acc, :])
midkts = betas[:, midk].dot(mmix.T[midk, :])
lowkts = betas[:, rej].dot(mmix.T[rej, :])
dnts = data[mask] - lowkts - midkts
if len(acc) != 0:
fout = utils.filewrite(utils.unmask(hikts, mask),
'hik_ts_{0}'.format(suffix), ref_img)
LGR.info('Writing high-Kappa time series: {}'.format(op.abspath(fout)))
:py:func:`tedana.utils.io.write_split_ts`.
betas_OC.nii Full ICA coefficient feature set.
betas_hik_OC.nii Denoised ICA coefficient feature set.
feats_OC2.nii Z-normalized spatial component maps. Generated
by :py:func:`tedana.utils.io.writefeats`.
comp_table.txt Component table. Generated by
:py:func:`tedana.utils.io.writect`.
====================== =================================================
"""
fout = utils.filewrite(ts, 'ts_OC', ref_img)
LGR.info('Writing optimally-combined time series: {}'.format(op.abspath(fout)))
varexpl = write_split_ts(ts, mmix, mask, acc, rej, midk, ref_img, suffix='OC')
ts_B = model.get_lstsq_coeffs(ts, mmix, mask)
fout = utils.filewrite(ts_B, 'betas_OC', ref_img)
LGR.info('Writing full ICA coefficient feature set: {}'.format(op.abspath(fout)))
if len(acc) != 0:
fout = utils.filewrite(ts_B[:, acc], 'betas_hik_OC', ref_img)
LGR.info('Writing denoised ICA coefficient feature set: {}'.format(op.abspath(fout)))
fout = writefeats(split_ts(ts, mmix, mask, acc)[0],
mmix[:, acc], mask, ref_img, suffix='OC2')
LGR.info('Writing Z-normalized spatial component maps: {}'.format(op.abspath(fout)))
writect(comptable, n_vols, acc, rej, midk, empty, ctname='comp_table.txt',
varexpl=varexpl)
LGR.info('Writing component table: {}'.format(op.abspath('comp_table.txt')))
Mixing matrix for converting input data to component space, where `C`
is components and `T` is the same as in `data`
mask : (S,) array_like
Boolean mask array
acc : :obj:`list`
List of accepted components used to subset `mmix`
Returns
-------
hikts : (S x T) :obj:`numpy.ndarray`
Time series reconstructed using only components in `acc`
rest : (S x T) :obj:`numpy.ndarray`
Original data with `hikts` removed
"""
cbetas = model.get_coeffs(data - data.mean(axis=-1, keepdims=True),
mmix, mask)
betas = cbetas[mask]
if len(acc) != 0:
hikts = utils.unmask(betas[:, acc].dot(mmix.T[acc, :]), mask)
else:
hikts = None
resid = data - hikts
return hikts, resid