Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
or -1 (a special value). Default is 1.
stabilize : :obj:`bool`, optional
Whether to stabilize convergence by reducing dimensionality, for low
quality data. Default is False.
Returns
-------
comptable : :obj:`pandas.DataFrame`
Component table with components classified as 'accepted', 'rejected',
or 'ignored'.
"""
LGR.info('Performing PCA component selection with Kundu decision tree')
comptable['classification'] = 'accepted'
comptable['rationale'] = ''
eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
return_val=True)
diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
diff_varex_norm.min()])
varex_norm_min = comptable['normalized variance explained'][
(len(diff_varex_norm) // 2) +
np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
varex_norm_cum = np.cumsum(comptable['normalized variance explained'])
fmin, fmid, fmax = getfbounds(n_echos)
if int(kdaw) == -1:
lim_idx = utils.andb([comptable['kappa'] < fmid,
comptable['kappa'] > fmin]) == 2
kappa_lim = comptable.loc[lim_idx, 'kappa'].values
Kappa dimensionality augmentation weight. Must be a non-negative float,
or -1 (a special value).
rdaw : :obj:`float`
Rho dimensionality augmentation weight. Must be a non-negative float,
or -1 (a special value).
stabilize : :obj:`bool`, optional
Whether to stabilize convergence by reducing dimensionality, for low
quality data. Default is False.
Returns
-------
comptable : :obj:`pandas.DataFrame`
Component table with components classified as 'accepted', 'rejected',
or 'ignored'.
"""
eigenvalue_elbow = getelbow(comptable['normalized variance explained'],
return_val=True)
diff_varex_norm = np.abs(np.diff(comptable['normalized variance explained']))
lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm) // 2):]
varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
diff_varex_norm.min()])
varex_norm_min = comptable['normalized variance explained'][
(len(diff_varex_norm) // 2) +
np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
varex_norm_cum = np.cumsum(comptable['normalized variance explained'])
fmin, fmid, fmax = utils.getfbounds(n_echos)
if int(kdaw) == -1:
lim_idx = utils.andb([comptable['kappa'] < fmid,
comptable['kappa'] > fmin]) == 2
kappa_lim = comptable.loc[lim_idx, 'kappa'].values
diff_varex_norm.min()])
varex_norm_min = ct_df['normalized variance explained'][
(len(diff_varex_norm)//2) +
np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
varex_norm_cum = np.cumsum(ct_df['normalized variance explained'])
fmin, fmid, fmax = utils.getfbounds(n_echos)
if int(kdaw) == -1:
lim_idx = utils.andb([ct_df['kappa'] < fmid,
ct_df['kappa'] > fmin]) == 2
kappa_lim = ct_df.loc[lim_idx, 'kappa'].values
kappa_thr = kappa_lim[getelbow(kappa_lim)]
lim_idx = utils.andb([ct_df['rho'] < fmid, ct_df['rho'] > fmin]) == 2
rho_lim = ct_df.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
stabilize = True
LGR.info('kdaw set to -1. Switching TEDPCA method to '
'kundu-stabilize')
elif int(rdaw) == -1:
lim_idx = utils.andb([ct_df['rho'] < fmid, ct_df['rho'] > fmin]) == 2
rho_lim = ct_df.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
else:
kappa_thr = np.average(
sorted([fmin, getelbow(ct_df['kappa'], return_val=True)/2, fmid]),
weights=[kdaw, 1, 1])
rho_thr = np.average(
sorted([fmin, getelbow_cons(ct_df['rho'], return_val=True)/2, fmid]),
weights=[rdaw, 1, 1])
# Reject if low Kappa, Rho, and variance explained
if int(kdaw) == -1:
lim_idx = utils.andb([ct_df['kappa'] < fmid,
ct_df['kappa'] > fmin]) == 2
kappa_lim = ct_df.loc[lim_idx, 'kappa'].values
kappa_thr = kappa_lim[getelbow(kappa_lim)]
lim_idx = utils.andb([ct_df['rho'] < fmid, ct_df['rho'] > fmin]) == 2
rho_lim = ct_df.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
stabilize = True
LGR.info('kdaw set to -1. Switching TEDPCA method to '
'kundu-stabilize')
elif int(rdaw) == -1:
lim_idx = utils.andb([ct_df['rho'] < fmid, ct_df['rho'] > fmin]) == 2
rho_lim = ct_df.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
else:
kappa_thr = np.average(
sorted([fmin, getelbow(ct_df['kappa'], return_val=True)/2, fmid]),
weights=[kdaw, 1, 1])
rho_thr = np.average(
sorted([fmin, getelbow_cons(ct_df['rho'], return_val=True)/2, fmid]),
weights=[rdaw, 1, 1])
# Reject if low Kappa, Rho, and variance explained
is_lowk = ct_df['kappa'] <= kappa_thr
is_lowr = ct_df['rho'] <= rho_thr
is_lowe = ct_df['normalized variance explained'] <= eigenvalue_elbow
is_lowkre = is_lowk & is_lowr & is_lowe
ct_df.loc[is_lowkre, 'classification'] = 'rejected'
ct_df.loc[is_lowkre, 'rationale'] += 'P001;'
ncls = unclf.copy()
for i_loop in range(3):
temp_comptable = comptable.loc[ncls].sort_values(by=['variance explained'],
ascending=False)
ncls = temp_comptable.loc[
temp_comptable['variance explained'].diff() < varex_upper_p].index.values
# Compute elbows from other elbows
f05, _, f01 = getfbounds(n_echos)
kappas_nonsig = comptable.loc[comptable['kappa'] < f01, 'kappa']
# NOTE: Would an elbow from all Kappa values *ever* be lower than one from
# a subset of lower values?
kappa_elbow = np.min((getelbow(kappas_nonsig, return_val=True),
getelbow(comptable['kappa'], return_val=True)))
rho_elbow = np.mean((getelbow(comptable.loc[ncls, 'rho'], return_val=True),
getelbow(comptable['rho'], return_val=True),
f05))
# Provisionally accept components based on Kappa and Rho elbows
acc_prov = ncls[(comptable.loc[ncls, 'kappa'] >= kappa_elbow) &
(comptable.loc[ncls, 'rho'] < rho_elbow)]
# Quit early if no potentially accepted components remain
if len(acc_prov) == 0:
LGR.warning('No BOLD-like components detected. Ignoring all remaining '
'components.')
ign = sorted(np.setdiff1d(all_comps, rej))
comptable.loc[ign, 'classification'] = 'ignored'
comptable.loc[ign, 'rationale'] += 'I006;'
# Move decision columns to end
comptable = clean_dataframe(comptable)
kappa_lim = comptable.loc[lim_idx, 'kappa'].values
kappa_thr = kappa_lim[getelbow(kappa_lim)]
lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
rho_lim = comptable.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
stabilize = True
LGR.info('kdaw set to -1. Switching TEDPCA algorithm to '
'kundu-stabilize')
elif int(rdaw) == -1:
lim_idx = utils.andb([comptable['rho'] < fmid, comptable['rho'] > fmin]) == 2
rho_lim = comptable.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
else:
kappa_thr = np.average(
sorted([fmin, (getelbow(comptable['kappa'], return_val=True) / 2), fmid]),
weights=[kdaw, 1, 1])
rho_thr = np.average(
sorted([fmin, (getelbow_cons(comptable['rho'], return_val=True) / 2), fmid]),
weights=[rdaw, 1, 1])
# Reject if low Kappa, Rho, and variance explained
is_lowk = comptable['kappa'] <= kappa_thr
is_lowr = comptable['rho'] <= rho_thr
is_lowe = comptable['normalized variance explained'] <= eigenvalue_elbow
is_lowkre = is_lowk & is_lowr & is_lowe
comptable.loc[is_lowkre, 'classification'] = 'rejected'
comptable.loc[is_lowkre, 'rationale'] += 'P001;'
# Reject if low variance explained
is_lows = comptable['normalized variance explained'] <= varex_norm_min
comptable.loc[is_lows, 'classification'] = 'rejected'
Kappa dimensionality augmentation weight. Must be a non-negative float,
or -1 (a special value).
rdaw : :obj:`float`
Rho dimensionality augmentation weight. Must be a non-negative float,
or -1 (a special value).
stabilize : :obj:`bool`, optional
Whether to stabilize convergence by reducing dimensionality, for low
quality data. Default is False.
Returns
-------
ct_df : :obj:`pandas.DataFrame`
Component table with components classified as 'accepted', 'rejected',
or 'ignored'.
"""
eigenvalue_elbow = getelbow(ct_df['normalized variance explained'],
return_val=True)
diff_varex_norm = np.abs(np.diff(ct_df['normalized variance explained']))
lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm)//2):]
varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
diff_varex_norm.min()])
varex_norm_min = ct_df['normalized variance explained'][
(len(diff_varex_norm)//2) +
np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
varex_norm_cum = np.cumsum(ct_df['normalized variance explained'])
fmin, fmid, fmax = utils.getfbounds(n_echos)
if int(kdaw) == -1:
lim_idx = utils.andb([ct_df['kappa'] < fmid,
ct_df['kappa'] > fmin]) == 2
kappa_lim = ct_df.loc[lim_idx, 'kappa'].values
diff_varex_norm = np.abs(np.diff(ct_df['normalized variance explained']))
lower_diff_varex_norm = diff_varex_norm[(len(diff_varex_norm)//2):]
varex_norm_thr = np.mean([lower_diff_varex_norm.max(),
diff_varex_norm.min()])
varex_norm_min = ct_df['normalized variance explained'][
(len(diff_varex_norm)//2) +
np.arange(len(lower_diff_varex_norm))[lower_diff_varex_norm >= varex_norm_thr][0] + 1]
varex_norm_cum = np.cumsum(ct_df['normalized variance explained'])
fmin, fmid, fmax = utils.getfbounds(n_echos)
if int(kdaw) == -1:
lim_idx = utils.andb([ct_df['kappa'] < fmid,
ct_df['kappa'] > fmin]) == 2
kappa_lim = ct_df.loc[lim_idx, 'kappa'].values
kappa_thr = kappa_lim[getelbow(kappa_lim)]
lim_idx = utils.andb([ct_df['rho'] < fmid, ct_df['rho'] > fmin]) == 2
rho_lim = ct_df.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
stabilize = True
LGR.info('kdaw set to -1. Switching TEDPCA method to '
'kundu-stabilize')
elif int(rdaw) == -1:
lim_idx = utils.andb([ct_df['rho'] < fmid, ct_df['rho'] > fmin]) == 2
rho_lim = ct_df.loc[lim_idx, 'rho'].values
rho_thr = rho_lim[getelbow(rho_lim)]
else:
kappa_thr = np.average(
sorted([fmin, getelbow(ct_df['kappa'], return_val=True)/2, fmid]),
weights=[kdaw, 1, 1])
rho_thr = np.average(