Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from brainspace.null_models import SpinRandomization
n_spins = 2000
sp = SpinRandomization(n_rep=n_spins, random_state=0)
sp.fit(centroids_lh, points_rh=centroids_rh)
gradient_spins_lh, gradient_spins_rh = sp.randomize(gradient[:200],
x_rh=gradient[200:])
###############################################################################
# Let's check the 3 first spin permutations
# First, append randomized data to spheres
for i in range(3):
array_name = 'gradient_spins{i}'.format(i=i)
gs2 = map_to_labels(gradient_spins_lh[i], parcellation_lh, mask=mask_lh,
fill=np.nan)
sphere_lh.append_array(gs2, name=array_name, at='p')
gs2 = map_to_labels(gradient_spins_rh[i], parcellation_rh, mask=mask_rh,
fill=np.nan)
sphere_rh.append_array(gs2, name=array_name, at='p')
# and plot original data and the 3 first randomizations
array_names = ['gradient', 'gradient_spins0', 'gradient_spins1',
'gradient_spins2']
plot_hemispheres(sphere_lh, sphere_rh, array_name=array_names,
interactive=False, embed_nb=True, size=(800, 800),
cmap_name='viridis_r')
name_gradients = [None] * n_parcellations
for i, cm in enumerate(conn_matrices):
# We ask for 2 gradients
gm = GradientMaps(n_gradients=1, approach='dm', kernel='normalized_angle',
random_state=0)
# fit to the connectivity matrix
gm.fit(cm)
# append gradients to the surfaces
array_name = 'grad0_Schaefer{0}'.format(list_parcels[i])
name_gradients[i] = array_name
grad = gm.gradients_[:, 0]
# map the gradient to the parcels
grad = map_to_labels(grad, labelings[i], mask=labelings[i] != 0,
fill=np.nan)
# append to hemispheres
print("Appending '%s'" % array_name)
surf_lh.append_array(grad[:n_pts_lh], name=array_name, at='p')
surf_rh.append_array(grad[n_pts_lh:], name=array_name, at='p')
###############################################################################
# Finally, we plot the first gradient for each parcellation as follows:
plot_hemispheres(surf_lh, surf_rh, array_name=name_gradients, interactive=False,
embed_nb=True, size=(800, 800), cmap_name='viridis')
from brainspace.gradient import GradientMaps
gm = GradientMaps(n_components=2, random_state=0)
gm.fit(correlation_matrix)
###############################################################################
# Visualize results
from brainspace.datasets import load_fsa5
from brainspace.plotting import plot_hemispheres
from brainspace.utils.parcellation import map_to_labels
# Map gradients to original parcels
grad = [None] * 2
for i, g in enumerate(gm.gradients_.T):
grad[i] = map_to_labels(g, labeling, mask=mask, fill=np.nan)
# Load fsaverage5 surfaces
surf_lh, surf_rh = load_fsa5()
# sphinx_gallery_thumbnail_number = 2
plot_hemispheres(surf_lh, surf_rh, array_name=grad, size=(1200, 400), cmap='viridis_r',
color_bar=True, label_text=['Grad1', 'Grad2'], zoom=1.5)
for i, emb in enumerate(list_embedding):
# We ask for 2 gradients
gm = GradientMaps(n_gradients=2, approach=emb, kernel='normalized_angle',
random_state=0)
# fit to the connectivity matrix
gm.fit(conn_matrix)
# append gradients to the surfaces
for k in range(2):
array_name = '{emb}_grad{k}'.format(emb=emb, k=k)
grad = gm.gradients_[:, k]
# map the gradient to the parcels (skip non-cortex)
grad = map_to_labels(grad, labeling, mask=mask_cortex, fill=np.nan)
# append to hemispheres
# print("Appending '%s'" % array_name)
surf_lh.append_array(grad[:n_pts_lh], name=array_name, at='point')
surf_rh.append_array(grad[n_pts_lh:], name=array_name, at='point')
###############################################################################
# For each embedding approach, we compute 2 gradients and append them to the
# left and right surfaces. Note that we have used 'normalized_angle' to build
# the affinity matrix.
#
# Next, for all embedding approaches, we display the first gradient
array_names = ['pca_grad0', 'le_grad0', 'dm_grad0']
plot_hemispheres(surf_lh, surf_rh, array_name=array_names, interactive=False,
sp.fit(centroids_lh, points_rh=centroids_rh)
gradient_spins_lh, gradient_spins_rh = sp.randomize(gradient[:200],
x_rh=gradient[200:])
###############################################################################
# Let's check the 3 first spin permutations
# First, append randomized data to spheres
for i in range(3):
array_name = 'gradient_spins{i}'.format(i=i)
gs2 = map_to_labels(gradient_spins_lh[i], parcellation_lh, mask=mask_lh,
fill=np.nan)
sphere_lh.append_array(gs2, name=array_name, at='p')
gs2 = map_to_labels(gradient_spins_rh[i], parcellation_rh, mask=mask_rh,
fill=np.nan)
sphere_rh.append_array(gs2, name=array_name, at='p')
# and plot original data and the 3 first randomizations
array_names = ['gradient', 'gradient_spins0', 'gradient_spins1',
'gradient_spins2']
plot_hemispheres(sphere_lh, sphere_rh, array_name=array_names,
interactive=False, embed_nb=True, size=(800, 800),
cmap_name='viridis_r')
###############################################################################
# Finally, we assess the correlation significance between myelin/thickness and
# the first canonical gradient without considering the spatial auto-correlation
# in and after accounting for this using spin permutations.
###############################################################################
# Let's visualize the the data. We are going to append the data to both hemispheres
# surfaces and spheres.
import numpy as np
from brainspace.plotting import plot_hemispheres
from brainspace.utils.parcellation import map_to_labels
# Append data to surfaces and spheres
map_feat = dict(zip(['myelin', 'thickness', 'gradient'],
[myelin, thickness, gradient]))
n_pts_lh = surf_lh.n_points
for fn, feat_parc in map_feat.items():
feat = map_to_labels(feat_parc, parcellation, mask=mask, fill=np.nan)
surf_lh.append_array(feat[:n_pts_lh], name=fn, at='p')
surf_rh.append_array(feat[n_pts_lh:], name=fn, at='p')
sphere_lh.append_array(feat[:n_pts_lh], name=fn, at='p')
sphere_rh.append_array(feat[n_pts_lh:], name=fn, at='p')
plot_hemispheres(surf_lh, surf_rh,
array_name=['myelin', 'thickness', 'gradient'],
interactive=False, embed_nb=True, size=(800, 600),
cmap_name=['YlOrBr_r', 'PuOr_r', 'viridis'])
###############################################################################
# We can also display the data on the spheres.
gradients_unaligned = [None] * 2
for i in range(2):
gradients_unaligned[i] = map_to_labels(gp.gradients_[i][:, 0], labeling,
mask=mask, fill=np.nan)
label_text = ['Unaligned Group 1', 'Unaligned Group 2']
plot_hemispheres(surf_lh, surf_rh, array_name=gradients_unaligned, size=(1200, 500),
cmap='viridis_r', color_bar=True, label_text=label_text)
###############################################################################
# With procrustes alignment
gradients_procrustes = [None] * 2
for i in range(2):
gradients_procrustes[i] = map_to_labels(gp.aligned_[i][:, 0], labeling, mask=mask,
fill=np.nan)
label_text = ['Procrustes Group 1', 'Procrustes Group 2']
plot_hemispheres(surf_lh, surf_rh, array_name=gradients_procrustes, size=(1200, 500),
cmap='viridis_r', color_bar=True, label_text=label_text)
###############################################################################
# With joint alignment
gradients_joint = [None] * 2
for i in range(2):
gradients_joint[i] = map_to_labels(gj.aligned_[i][:, 0], labeling, mask=mask,
fill=np.nan)
label_text = ['Joint Group 1', 'Joint Group 2']
###############################################################################
# Note that the default parameters are normalized angle kernel, diffusion
# embedding approach, 10 components. Once you have your gradients, a good first
# step is to simply inspect what they look like. Let’s have a look at the first
# two gradients.
import numpy as np
from brainspace.utils.parcellation import map_to_labels
mask = labeling != 0
grad = [None] * 2
for i in range(2):
# map the gradient to the parcels
grad[i] = map_to_labels(gm.gradients_[:, i], labeling, mask=mask, fill=np.nan)
plot_hemispheres(surf_lh, surf_rh, array_name=grad, size=(1200, 400), cmap='viridis_r',
color_bar=True, label_text=['Grad1', 'Grad2'], zoom=1.55)
###############################################################################
# But which gradients should you keep for your analysis? In some cases you may
# have an a priori interest in some previously defined set of gradients. When
# you do not have a pre-defined set, you can instead look at the lambdas
# (eigenvalues) of each component in a scree plot. Higher eigenvalues (or lower
# in Laplacian eigenmaps) are more important, so one can choose a cut-off based
# on a scree plot.
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, figsize=(5, 4))
###############################################################################
# Note that the default parameters are diffusion embedding approach, 10
# components, and no kernel (use raw data). Once you have your gradients, a
# good first step is to simply inspect what they look like. Let’s have a look
# at the first two gradients.
import numpy as np
from brainspace.utils.parcellation import map_to_labels
mask = labeling != 0
grad = [None] * 2
for i in range(2):
# map the gradient to the parcels
grad[i] = map_to_labels(gm.gradients_[:, i], labeling, mask=mask, fill=np.nan)
plot_hemispheres(surf_lh, surf_rh, array_name=grad, size=(1200, 400), cmap='viridis_r',
color_bar=True, label_text=['Grad1', 'Grad2'], zoom=1.55)
###############################################################################
# But which gradients should you keep for your analysis? In some cases you may
# have an a priori interest in some previously defined set of gradients. When
# you do not have a pre-defined set, you can instead look at the lambdas
# (eigenvalues) of each component in a scree plot. Higher eigenvalues (or lower
# in Laplacian eigenmaps) are more important, so one can choose a cut-off based
# on a scree plot.
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, figsize=(5, 4))