Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
del tsoc_dm
tsoc_Babs = np.abs(tsoc_B)
PSC = tsoc_B / tsoc.mean(axis=-1, keepdims=True) * 100
# compute skews to determine signs based on unnormalized weights,
# correct mmix & WTS signs based on spatial distribution tails
signs = stats.skew(WTS, axis=0)
signs /= np.abs(signs)
mmix_corrected = mmix * signs
WTS *= signs
PSC *= signs
totvar = (tsoc_B**2).sum()
totvar_norm = (WTS**2).sum()
# compute Betas and means over TEs for TE-dependence analysis
betas = get_coeffs(utils.unmask(catd, mask),
mmix_corrected,
np.repeat(mask[:, np.newaxis], len(tes), axis=1))
betas = betas[mask, ...]
n_voxels, n_echos, n_components = betas.shape
mu = catd.mean(axis=-1, dtype=float)
tes = np.reshape(tes, (n_echos, 1))
fmin, _, _ = getfbounds(n_echos)
# set up Xmats
X1 = mu.T # Model 1
X2 = np.tile(tes, (1, n_voxels)) * mu.T / t2s.T # Model 2
# tables for component selection
kappas = np.zeros([n_components])
rhos = np.zeros([n_components])
varex = np.zeros([n_components])
Boolean mask array
comptable : (C x X) :obj:`pandas.DataFrame`
Component metric table. One row for each component, with a column for
each metric. Requires at least two columns: "component" and
"classification".
Returns
-------
hikts : (S x T) :obj:`numpy.ndarray`
Time series reconstructed using only components in `acc`
rest : (S x T) :obj:`numpy.ndarray`
Original data with `hikts` removed
"""
acc = comptable[comptable.classification == 'accepted'].index.values
cbetas = get_coeffs(data - data.mean(axis=-1, keepdims=True),
mmix, mask)
betas = cbetas[mask]
if len(acc) != 0:
hikts = utils.unmask(betas[:, acc].dot(mmix.T[acc, :]), mask)
else:
hikts = None
resid = data - hikts
return hikts, resid
====================== =================================================
hik_ts_[suffix].nii High-Kappa time series.
midk_ts_[suffix].nii Mid-Kappa time series.
low_ts_[suffix].nii Low-Kappa time series.
dn_ts_[suffix].nii Denoised time series.
====================== =================================================
"""
acc = comptable[comptable.classification == 'accepted'].index.values
rej = comptable[comptable.classification == 'rejected'].index.values
# mask and de-mean data
mdata = data[mask]
dmdata = mdata.T - mdata.T.mean(axis=0)
# get variance explained by retained components
betas = get_coeffs(dmdata.T, mmix, mask=None)
varexpl = (1 - ((dmdata.T - betas.dot(mmix.T))**2.).sum() /
(dmdata**2.).sum()) * 100
LGR.info('Variance explained by ICA decomposition: {:.02f}%'.format(varexpl))
# create component and de-noised time series and save to files
hikts = betas[:, acc].dot(mmix.T[acc, :])
lowkts = betas[:, rej].dot(mmix.T[rej, :])
dnts = data[mask] - lowkts
if len(acc) != 0:
fout = filewrite(utils.unmask(hikts, mask),
'hik_ts_{0}'.format(suffix), ref_img)
LGR.info('Writing high-Kappa time series: {}'.format(op.abspath(fout)))
if len(rej) != 0:
fout = filewrite(utils.unmask(lowkts, mask),
# mask everything we can
tsoc = tsoc[mask, :]
catd = catd[mask, ...]
t2s = t2s[mask]
# demean optimal combination
tsoc_dm = tsoc - tsoc.mean(axis=-1, keepdims=True)
# compute un-normalized weight dataset (features)
if mmixN is None:
mmixN = mmix
WTS = computefeats2(tsoc, mmixN, mask=None, normalize=False)
# compute PSC dataset - shouldn't have to refit data
tsoc_B = get_coeffs(tsoc_dm, mmix, mask=None)
del tsoc_dm
tsoc_Babs = np.abs(tsoc_B)
PSC = tsoc_B / tsoc.mean(axis=-1, keepdims=True) * 100
# compute skews to determine signs based on unnormalized weights,
# correct mmix & WTS signs based on spatial distribution tails
signs = stats.skew(WTS, axis=0)
signs /= np.abs(signs)
mmix_corrected = mmix * signs
WTS *= signs
PSC *= signs
totvar = (tsoc_B**2).sum()
totvar_norm = (WTS**2).sum()
# compute Betas and means over TEs for TE-dependence analysis
betas = get_coeffs(utils.unmask(catd, mask),
dn_ts_OC.nii Denoised time series. Generated by
:py:func:`tedana.utils.io.write_split_ts`.
betas_OC.nii Full ICA coefficient feature set.
betas_hik_OC.nii Denoised ICA coefficient feature set.
feats_OC2.nii Z-normalized spatial component maps. Generated
by :py:func:`tedana.utils.io.writefeats`.
====================== =================================================
"""
acc = comptable[comptable.classification == 'accepted'].index.values
fout = filewrite(ts, 'ts_OC', ref_img)
LGR.info('Writing optimally combined time series: {}'.format(op.abspath(fout)))
write_split_ts(ts, mmix, mask, comptable, ref_img, suffix='OC')
ts_B = get_coeffs(ts, mmix, mask)
fout = filewrite(ts_B, 'betas_OC', ref_img)
LGR.info('Writing full ICA coefficient feature set: {}'.format(op.abspath(fout)))
if len(acc) != 0:
fout = filewrite(ts_B[:, acc], 'betas_hik_OC', ref_img)
LGR.info('Writing denoised ICA coefficient feature set: {}'.format(op.abspath(fout)))
fout = writefeats(split_ts(ts, mmix, mask, comptable)[0],
mmix[:, acc], mask, ref_img, suffix='OC2')
LGR.info('Writing Z-normalized spatial component maps: {}'.format(op.abspath(fout)))