Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
params = zip(iter_ijks, iter_dfs)
if n_cores == 1:
perm_results = []
for pp in tqdm(params, total=n_iters):
perm_results.append(self._run_fwe_permutation(pp))
else:
with mp.Pool(n_cores) as p:
perm_results = list(tqdm(p.imap(self._run_fwe_permutation, params), total=n_iters))
perm_max_values = perm_results
# Voxel-level FWE
vfwe_map = of_values.copy()
for i_vox, val in enumerate(of_values):
vfwe_map[i_vox] = -np.log(null_to_p(val, perm_max_values, 'upper'))
vfwe_map[np.isinf(vfwe_map)] = -np.log(np.finfo(float).eps)
images = {'logp_level-voxel': vfwe_map}
return images
else:
with mp.Pool(n_cores) as p:
perm_results = list(tqdm(p.imap(self._run_fwe_permutation, params),
total=n_iters))
(self.null_distributions['fwe_level-voxel'],
self.null_distributions['fwe_level-cluster']) = zip(*perm_results)
# Cluster-level FWE
vthresh_z_map = unmask(vthresh_z_values, self.mask).get_data()
labeled_matrix, n_clusters = ndimage.measurements.label(vthresh_z_map, conn)
logp_cfwe_map = np.zeros(self.mask.shape)
for i_clust in range(1, n_clusters + 1):
clust_size = np.sum(labeled_matrix == i_clust)
clust_idx = np.where(labeled_matrix == i_clust)
logp_cfwe_map[clust_idx] = -np.log(null_to_p(
clust_size, self.null_distributions['fwe_level-cluster'], 'upper'))
logp_cfwe_map[np.isinf(logp_cfwe_map)] = -np.log(np.finfo(float).eps)
logp_cfwe_map = apply_mask(nib.Nifti1Image(logp_cfwe_map, self.mask.affine),
self.mask)
# Voxel-level FWE
p_fwe_values = np.zeros(ale_values.shape)
for voxel in range(ale_values.shape[0]):
p_fwe_values[voxel] = null_to_p(
ale_values[voxel], self.null_distributions['fwe_level-voxel'],
tail='upper')
z_fwe_values = p_to_z(p_fwe_values, tail='one')
logp_vfwe_values = -np.log(p_fwe_values)
logp_vfwe_values[np.isinf(logp_vfwe_values)] = -np.log(np.finfo(float).eps)
np.random.shuffle(id_idx)
iter_grp1_ale_values = np.ones(np.sum(grp1_voxel))
for j_exp in id_idx[:n_grp1]:
iter_grp1_ale_values *= (1. - red_ma_arr[j_exp, :])
iter_grp1_ale_values = 1 - iter_grp1_ale_values
iter_grp2_ale_values = np.ones(np.sum(grp1_voxel))
for j_exp in id_idx[n_grp1:]:
iter_grp2_ale_values *= (1. - red_ma_arr[j_exp, :])
iter_grp2_ale_values = 1 - iter_grp2_ale_values
iter_diff_values[i_iter, :] = iter_grp1_ale_values - iter_grp2_ale_values
for voxel in range(np.sum(grp1_voxel)):
# TODO: Check that upper is appropriate
grp1_p_arr[voxel] = null_to_p(diff_ale_values[voxel],
iter_diff_values[:, voxel],
tail='upper')
grp1_z_arr = p_to_z(grp1_p_arr, tail='one')
# Unmask
grp1_z_map = np.zeros(image1.shape[0])
grp1_z_map[:] = np.nan
grp1_z_map[grp1_voxel] = grp1_z_arr
# B > A contrast
grp2_p_arr = np.ones(np.sum(grp2_voxel))
grp2_z_map = np.zeros(image2.shape[0])
grp2_z_map[:] = np.nan
if np.sum(grp2_voxel) > 0:
# Get MA values for second sample only for voxels significant in
# second sample's meta-analysis.
diff_ale_values = grp2_ale_values - grp1_ale_values
vthresh_z_map = unmask(vthresh_z_values, self.mask).get_data()
labeled_matrix, n_clusters = ndimage.measurements.label(vthresh_z_map, conn)
logp_cfwe_map = np.zeros(self.mask.shape)
for i_clust in range(1, n_clusters + 1):
clust_size = np.sum(labeled_matrix == i_clust)
clust_idx = np.where(labeled_matrix == i_clust)
logp_cfwe_map[clust_idx] = -np.log(null_to_p(
clust_size, self.null_distributions['fwe_level-cluster'], 'upper'))
logp_cfwe_map[np.isinf(logp_cfwe_map)] = -np.log(np.finfo(float).eps)
logp_cfwe_map = apply_mask(nib.Nifti1Image(logp_cfwe_map, self.mask.affine),
self.mask)
# Voxel-level FWE
p_fwe_values = np.zeros(ale_values.shape)
for voxel in range(ale_values.shape[0]):
p_fwe_values[voxel] = null_to_p(
ale_values[voxel], self.null_distributions['fwe_level-voxel'],
tail='upper')
z_fwe_values = p_to_z(p_fwe_values, tail='one')
logp_vfwe_values = -np.log(p_fwe_values)
logp_vfwe_values[np.isinf(logp_vfwe_values)] = -np.log(np.finfo(float).eps)
# Write out unthresholded value images
images = {
'z_vthresh': vthresh_z_values,
'logp_level-voxel': logp_vfwe_values,
'z_level-voxel': z_fwe_values,
'logp_level-cluster': logp_cfwe_map,
}
return images
params = zip(iter_dfs1, iter_dfs2, iter_ijks1, iter_ijks2)
if n_cores == 1:
perm_results = []
for pp in tqdm(params, total=n_iters):
perm_results.append(self._run_fwe_permutation(pp))
else:
with mp.Pool(n_cores) as p:
perm_results = list(tqdm(p.imap(self._run_fwe_permutation, params), total=n_iters))
pAgF_null_chi2_dist, pFgA_null_chi2_dist = zip(*perm_results)
# pAgF_FWE
pAgF_null_chi2_dist = np.squeeze(pAgF_null_chi2_dist)
pAgF_p_FWE = np.empty_like(pAgF_chi2_vals).astype(float)
for voxel in range(pFgA_chi2_vals.shape[0]):
pAgF_p_FWE[voxel] = null_to_p(pAgF_chi2_vals[voxel],
pAgF_null_chi2_dist,
tail='upper')
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# 0 or 1. Prevents inf z-values.
pAgF_p_FWE[pAgF_p_FWE < eps] = eps
pAgF_p_FWE[pAgF_p_FWE > (1. - eps)] = 1. - eps
pAgF_z_FWE = p_to_z(pAgF_p_FWE, tail='two') * pAgF_sign
# pFgA_FWE
pFgA_null_chi2_dist = np.squeeze(pFgA_null_chi2_dist)
pFgA_p_FWE = np.empty_like(pFgA_chi2_vals).astype(float)
for voxel in range(pFgA_chi2_vals.shape[0]):
pFgA_p_FWE[voxel] = null_to_p(pFgA_chi2_vals[voxel],
pFgA_null_chi2_dist,
tail='upper')
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# Cluster-level FWE
labeled_matrix, n_clusters = ndimage.measurements.label(vthresh_of_map, conn)
cfwe_map = np.zeros(self.mask.shape)
for i_clust in range(1, n_clusters + 1):
clust_size = np.sum(labeled_matrix == i_clust)
clust_idx = np.where(labeled_matrix == i_clust)
cfwe_map[clust_idx] = -np.log(null_to_p(
clust_size, perm_clust_sizes, 'upper'))
cfwe_map[np.isinf(cfwe_map)] = -np.log(np.finfo(float).eps)
cfwe_map = apply_mask(nib.Nifti1Image(cfwe_map, self.mask.affine),
self.mask)
# Voxel-level FWE
vfwe_map = apply_mask(of_map, self.mask)
for i_vox, val in enumerate(vfwe_map):
vfwe_map[i_vox] = -np.log(null_to_p(val, perm_max_values, 'upper'))
vfwe_map[np.isinf(vfwe_map)] = -np.log(np.finfo(float).eps)
vthresh_of_map = apply_mask(nib.Nifti1Image(vthresh_of_map,
of_map.affine),
self.mask)
images = {'vthresh': vthresh_of_map,
'logp_level-cluster': cfwe_map,
'logp_level-voxel': vfwe_map}
return images
for pp in tqdm(params, total=n_iters):
perm_results.append(self._run_fwe_permutation(pp))
else:
with mp.Pool(n_cores) as p:
perm_results = list(tqdm(p.imap(self._run_fwe_permutation, params),
total=n_iters))
perm_max_values, perm_clust_sizes = zip(*perm_results)
# Cluster-level FWE
labeled_matrix, n_clusters = ndimage.measurements.label(vthresh_of_map, conn)
cfwe_map = np.zeros(self.mask.shape)
for i_clust in range(1, n_clusters + 1):
clust_size = np.sum(labeled_matrix == i_clust)
clust_idx = np.where(labeled_matrix == i_clust)
cfwe_map[clust_idx] = -np.log(null_to_p(
clust_size, perm_clust_sizes, 'upper'))
cfwe_map[np.isinf(cfwe_map)] = -np.log(np.finfo(float).eps)
cfwe_map = apply_mask(nib.Nifti1Image(cfwe_map, self.mask.affine),
self.mask)
# Voxel-level FWE
vfwe_map = apply_mask(of_map, self.mask)
for i_vox, val in enumerate(vfwe_map):
vfwe_map[i_vox] = -np.log(null_to_p(val, perm_max_values, 'upper'))
vfwe_map[np.isinf(vfwe_map)] = -np.log(np.finfo(float).eps)
vthresh_of_map = apply_mask(nib.Nifti1Image(vthresh_of_map,
of_map.affine),
self.mask)
images = {'vthresh': vthresh_of_map,
'logp_level-cluster': cfwe_map,
'logp_level-voxel': vfwe_map}
data_signs = np.sign(z_maps[z_maps != 0])
data_signs[data_signs < 0] = 0
posprop = np.mean(data_signs)
for i in range(n_iters):
# Randomly flip signs of z-maps based on proportion of z-value
# signs across all maps.
iter_z_maps = np.copy(z_maps)
signs = np.random.choice(a=2, size=k, p=[1 - posprop, posprop])
signs[signs == 0] = -1
iter_z_maps *= signs[:, None]
iter_t_maps[i, :], _ = stats.ttest_1samp(iter_z_maps,
popmean=0, axis=0)
iter_t_maps[np.isnan(iter_t_maps)] = 0
for voxel in range(iter_t_maps.shape[1]):
p_map[voxel] = null_to_p(t_map[voxel], iter_t_maps[:, voxel])
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# 0 or 1. Prevents inf z-values.
p_map[p_map < 1e-16] = 1e-16
p_map[p_map > (1. - 1e-16)] = 1. - 1e-16
elif null != 'theoretical':
raise ValueError('Input null must be "theoretical" or "empirical".')
# Convert p to z, preserving signs
z_map = p_to_z(p_map, tail='two') * sign
log_p_map = -np.log10(p_map)
images = {'t': t_map,
'p': p_map,
'z': z_map,
'log_p': log_p_map}
elif inference == 'ffx':
pAgF_p_FWE = np.empty_like(pAgF_chi2_vals).astype(float)
for voxel in range(pFgA_chi2_vals.shape[0]):
pAgF_p_FWE[voxel] = null_to_p(pAgF_chi2_vals[voxel],
pAgF_null_chi2_dist,
tail='upper')
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# 0 or 1. Prevents inf z-values.
pAgF_p_FWE[pAgF_p_FWE < eps] = eps
pAgF_p_FWE[pAgF_p_FWE > (1. - eps)] = 1. - eps
pAgF_z_FWE = p_to_z(pAgF_p_FWE, tail='two') * pAgF_sign
# pFgA_FWE
pFgA_null_chi2_dist = np.squeeze(pFgA_null_chi2_dist)
pFgA_p_FWE = np.empty_like(pFgA_chi2_vals).astype(float)
for voxel in range(pFgA_chi2_vals.shape[0]):
pFgA_p_FWE[voxel] = null_to_p(pFgA_chi2_vals[voxel],
pFgA_null_chi2_dist,
tail='upper')
# Crop p-values of 0 or 1 to nearest values that won't evaluate to
# 0 or 1. Prevents inf z-values.
pFgA_p_FWE[pFgA_p_FWE < eps] = eps
pFgA_p_FWE[pFgA_p_FWE > (1. - eps)] = 1. - eps
pFgA_z_FWE = p_to_z(pFgA_p_FWE, tail='two') * pFgA_sign
images = {
'consistency_p_FWE': pAgF_p_FWE,
'consistency_z_FWE': pAgF_z_FWE,
'specificity_p_FWE': pFgA_p_FWE,
'specificity_z_FWE': pFgA_z_FWE,
}
return images