Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Now, we compute the gradients using 3 different embedding approaches: PCA,
# Laplacian embeddings (i.e., 'le') and Diffusion maps (i.e., 'dm')
import numpy as np
from brainspace.gradient import GradientMaps
from brainspace.utils.parcellation import map_to_labels
# list of embedding approaches
list_embedding = ['pca', 'le', 'dm']
mask_cortex = labeling != 0
for i, emb in enumerate(list_embedding):
# We ask for 2 gradients
gm = GradientMaps(n_gradients=2, approach=emb, kernel='normalized_angle',
random_state=0)
# fit to the connectivity matrix
gm.fit(conn_matrix)
# append gradients to the surfaces
for k in range(2):
array_name = '{emb}_grad{k}'.format(emb=emb, k=k)
grad = gm.gradients_[:, k]
# map the gradient to the parcels (skip non-cortex)
grad = map_to_labels(grad, labeling, mask=mask_cortex, fill=np.nan)
# append to hemispheres
# print("Appending '%s'" % array_name)
surf_lh.append_array(grad[:n_pts_lh], name=array_name, at='point')
masked_regions = [regions_list[i] for i in mat_mask]
corr_plot = plotting.plot_matrix(c, figure=(15, 15), labels=masked_regions,
vmax=0.8, vmin=-0.8, reorder=True)
###############################################################################
# Run gradient analysis and visualize
# +++++++++++++++++++++++++++++++++++
#
# Run gradient analysis
from brainspace.gradient import GradientMaps
gm = GradientMaps(n_components=2, random_state=0)
gm.fit(correlation_matrix)
###############################################################################
# Visualize results
from brainspace.datasets import load_fsa5
from brainspace.plotting import plot_hemispheres
from brainspace.utils.parcellation import map_to_labels
# Map gradients to original parcels
grad = [None] * 2
for i, g in enumerate(gm.gradients_.T):
grad[i] = map_to_labels(g, labeling, mask=mask, fill=np.nan)
# Load fsaverage5 surfaces
ax[i].yaxis.set_visible(False)
###############################################################################
# Now, we use our GradientMaps class to build one gradient for each connectivity
# matrix. Gradients are the appended to the surfaces.
import numpy as np
from brainspace.gradient import GradientMaps
from brainspace.utils.parcellation import map_to_labels
name_gradients = [None] * n_parcellations
for i, cm in enumerate(conn_matrices):
# We ask for 2 gradients
gm = GradientMaps(n_gradients=1, approach='dm', kernel='normalized_angle',
random_state=0)
# fit to the connectivity matrix
gm.fit(cm)
# append gradients to the surfaces
array_name = 'grad0_Schaefer{0}'.format(list_parcels[i])
name_gradients[i] = array_name
grad = gm.gradients_[:, 0]
# map the gradient to the parcels
grad = map_to_labels(grad, labelings[i], mask=labelings[i] != 0,
fill=np.nan)
# append to hemispheres
print("Appending '%s'" % array_name)
plot_hemispheres(surf_lh, surf_rh, array_name=gradients_embedding, size=(1200, 800),
cmap='viridis_r', color_bar=True, label_text=label_text)
###############################################################################
# Gradient alignment
# +++++++++++++++++++
#
# A more principled way of increasing comparability across gradients are
# alignment techniques. BrainSpace provides two alignment techniques:
# Procrustes analysis, and joint alignment. For this example we will load
# functional connectivity data of a second subject group and align it with the
# first group.
conn_matrix2 = load_group_fc('schaefer', scale=400, group='holdout')
gp = GradientMaps(kernel='normalized_angle', alignment='procrustes')
gj = GradientMaps(kernel='normalized_angle', alignment='joint')
gp.fit([conn_matrix, conn_matrix2])
gj.fit([conn_matrix, conn_matrix2])
###############################################################################
# Here, `gp` contains the Procrustes aligned data and `gj` contains the joint
# aligned data. Let’s plot them, but in separate figures to keep things
# organized.
# First gradient from original and holdout data, without alignment
gradients_unaligned = [None] * 2
for i in range(2):
gradients_unaligned[i] = map_to_labels(gp.gradients_[i][:, 0], labeling,
mask=mask, fill=np.nan)
###############################################################################
# Let’s first look at the parcellation scheme we’re using.
from brainspace.plotting import plot_hemispheres
plot_hemispheres(surf_lh, surf_rh, array_name=labeling, size=(1200, 200),
cmap='tab20', zoom=1.85)
###############################################################################
# and let’s construct our gradients.
from brainspace.gradient import GradientMaps
# Ask for 10 gradients (default)
gm = GradientMaps(n_components=10, random_state=0)
gm.fit(conn_matrix)
###############################################################################
# Note that the default parameters are normalized angle kernel, diffusion
# embedding approach, 10 components. Once you have your gradients, a good first
# step is to simply inspect what they look like. Let’s have a look at the first
# two gradients.
import numpy as np
from brainspace.utils.parcellation import map_to_labels
mask = labeling != 0
grad = [None] * 2
###############################################################################
# Let’s first look at the parcellation scheme we’re using.
from brainspace.plotting import plot_hemispheres
plot_hemispheres(surf_lh, surf_rh, array_name=labeling, size=(1200, 200),
cmap='tab20', zoom=1.85)
###############################################################################
# and let’s construct our gradients.
from brainspace.gradient import GradientMaps
# Ask for 10 gradients (default)
gm = GradientMaps(n_components=10, random_state=0)
gm.fit(conn_matrix)
###############################################################################
# Note that the default parameters are diffusion embedding approach, 10
# components, and no kernel (use raw data). Once you have your gradients, a
# good first step is to simply inspect what they look like. Let’s have a look
# at the first two gradients.
import numpy as np
from brainspace.utils.parcellation import map_to_labels
mask = labeling != 0
grad = [None] * 2
cmap='viridis_r', color_bar=True, label_text=label_text)
###############################################################################
# Gradient alignment
# +++++++++++++++++++
#
# A more principled way of increasing comparability across gradients are
# alignment techniques. BrainSpace provides two alignment techniques:
# Procrustes analysis, and joint alignment. For this example we will load
# functional connectivity data of a second subject group and align it with the
# first group.
conn_matrix2 = load_group_fc('schaefer', scale=400, group='holdout')
gp = GradientMaps(kernel='normalized_angle', alignment='procrustes')
gj = GradientMaps(kernel='normalized_angle', alignment='joint')
gp.fit([conn_matrix, conn_matrix2])
gj.fit([conn_matrix, conn_matrix2])
###############################################################################
# Here, `gp` contains the Procrustes aligned data and `gj` contains the joint
# aligned data. Let’s plot them, but in separate figures to keep things
# organized.
# First gradient from original and holdout data, without alignment
gradients_unaligned = [None] * 2
for i in range(2):
gradients_unaligned[i] = map_to_labels(gp.gradients_[i][:, 0], labeling,
mask=mask, fill=np.nan)
# gradients. However, you should always inspect the output of an alignment;
# if the input data are sufficiently dissimilar then the alignment may produce
# odd results.
#
#
# In some instances, you may want to align gradients to an out-of-sample
# gradient, for example when aligning individuals to a hold-out group gradient.
# When performing a Procrustes alignemnt, a 'reference' can be specified.
# The first alignment iteration will then be to the reference. For purposes of
# this example, we will use the gradient of the hold-out group as the
# reference.
gref = GradientMaps(kernel='normalized_angle', approach='le')
gref.fit(conn_matrix2)
galign = GradientMaps(kernel='normalized_angle', approach='le', alignment='procrustes')
galign.fit(conn_matrix, reference=gref.gradients_)
###############################################################################
# The gradients in `galign.aligned_` are now aligned to the reference
# gradients.
#
# Gradient fusion
# +++++++++++++++++++
# We can also fuse data across multiple modalities and build mutli-modal
# gradients. In this case we only look at one set of output gradients,
# rather than one per modality.
#
# First, let's load the example data of microstructural profile covariance
# (Paquola et al., 2019) and functional connectivity.
max_rk = min(max_rk)
for j, a in enumerate(args):
m = masks[j]
a[m] = minmax_scale(a[m], feature_range=(1, max_rk))
return np.hstack(args)
# fuse the matrices
fused_matrix = fusion(fc, mpc)
###############################################################################
# We then use this output in the fit function. This will convert the long
# horizontal array into a square affinity matrix, and then perform embedding.
gm = GradientMaps(n_components=2, kernel='normalized_angle')
gm.fit(fused_matrix)
gradients_fused = [None] * 2
for i in range(2):
gradients_fused[i] = map_to_labels(gm.gradients_[:, i], labeling, mask=mask,
fill=np.nan)
plot_hemispheres(surf_lh, surf_rh, array_name=gradients_fused,
label_text=['Gradient 1', 'Gradient 2'], size=(1200, 500),
color_bar=True, cmap='viridis')
###############################################################################
# Although in this example, we don't see any big differences, if the input data
# was less similar, alignments may also resolve changes in the order of the
# gradients. However, you should always inspect the output of an alignment;
# if the input data are sufficiently dissimilar then the alignment may produce
# odd results.
#
#
# In some instances, you may want to align gradients to an out-of-sample
# gradient, for example when aligning individuals to a hold-out group gradient.
# When performing a Procrustes alignemnt, a 'reference' can be specified.
# The first alignment iteration will then be to the reference. For purposes of
# this example, we will use the gradient of the hold-out group as the
# reference.
gref = GradientMaps(kernel='normalized_angle', approach='le')
gref.fit(conn_matrix2)
galign = GradientMaps(kernel='normalized_angle', approach='le', alignment='procrustes')
galign.fit(conn_matrix, reference=gref.gradients_)
###############################################################################
# The gradients in `galign.aligned_` are now aligned to the reference
# gradients.
#
# Gradient fusion
# +++++++++++++++++++
# We can also fuse data across multiple modalities and build mutli-modal
# gradients. In this case we only look at one set of output gradients,
# rather than one per modality.
#