Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import cortex
import numpy as np
np.random.seed(1234)
import matplotlib.pyplot as plt
subject = 'S1'
xfm = 'fullhead'
# Creating a random dataset that is the shape for this transform with one
# entry for each voxel
test_data = np.random.randn(31, 100, 100)
# This creates a Volume object for our test dataset for the given subject
# and transform
vol_data = cortex.Volume(test_data, subject, xfm, vmin=-2, vmax=2)
cortex.quickshow(vol_data)
plt.show()
# Now you can do arithmetic with the Volume
vol_plus = vol_data + 1
cortex.quickshow(vol_plus)
plt.show()
# You can also do multiplication
vol_mult = vol_data * 4
cortex.quickshow(vol_mult)
plt.show()
subject = 'S1'
xfm = 'fullhead'
# Creating a random dataset that is the shape for this transform with one
# entry for each voxel
test_data = np.random.randn(31, 100, 100)
# This creates a Volume object for our test dataset for the given subject
# and transform
vol_data = cortex.Volume(test_data, subject, xfm)
cortex.quickshow(vol_data)
plt.show()
# Can also alter the minimum and maximum values shown on the colorbar
vol_data_thresh = cortex.Volume(test_data, subject, xfm, vmin=-1, vmax=1)
cortex.quickshow(vol_data_thresh)
plt.show()
# If you have NaN values, those voxels show up transparent on the brain
test_data[10:15, :, :] = np.nan
vol_data_nan = cortex.Volume(test_data, subject, xfm)
cortex.quickshow(vol_data_nan)
plt.show()
subject = "S1"
xfm = "fullhead"
roi = "EBA"
# Get the map of which voxels are inside of our ROI
roi_masks = cortex.utils.get_roi_masks(subject, xfm,
roi_list=[roi],
gm_sampler='cortical-conservative', # Select only voxels mostly within cortex
split_lr=False, # No separate left/right ROIs
threshold=None, # Leave roi mask values as probabilites / fractions
return_dict=True
)
# Plot the mask for one ROI onto a flatmap
roi_data = cortex.Volume(roi_masks[roi], subject, xfm,
vmin=0, # This is a probability mask, so only
vmax=1, # so scale btw zero and one
cmap="inferno", # For pretty
)
cortex.quickflat.make_figure(roi_data,
thick=1, # select a single depth (btw white matter & pia)
sampler='nearest', # no interpolation
with_curvature=True,
with_colorbar=True,
)
plt.show()
working with. Pass the voxel volume through the mapper and you get out a
vertex mapping of that data. You can plot both of these as you normally would.
"""
import cortex
import cortex.polyutils
import numpy as np
np.random.seed(1234)
import matplotlib.pyplot as plt
subject = 'S1'
xfm = 'fullhead'
# First create example voxel data for this subject and transform
voxel_data = np.random.randn(31, 100, 100)
voxel_vol = cortex.Volume(voxel_data, subject, xfm)
# Then we have to get a mapper from voxels to vertices for this transform
mapper = cortex.get_mapper(subject, xfm, 'line_nearest', recache=True)
# Just pass the voxel data through the mapper to get vertex data
vertex_map = mapper(voxel_vol)
# You can plot both as you would normally plot Volume and Vertex data
cortex.quickshow(voxel_vol)
plt.show()
cortex.quickshow(vertex_map)
plt.show()
test_data = np.random.randn(31, 100, 100)
# This creates a Volume object for our test dataset for the given subject
# and transform
vol_data = cortex.Volume(test_data, subject, xfm)
cortex.quickshow(vol_data)
plt.show()
# Can also alter the minimum and maximum values shown on the colorbar
vol_data_thresh = cortex.Volume(test_data, subject, xfm, vmin=-1, vmax=1)
cortex.quickshow(vol_data_thresh)
plt.show()
# If you have NaN values, those voxels show up transparent on the brain
test_data[10:15, :, :] = np.nan
vol_data_nan = cortex.Volume(test_data, subject, xfm)
cortex.quickshow(vol_data_nan)
plt.show()
# The first two are gradients going in different directions across the brain
# and the third is stripes across certain slices of the brain
test1 = np.arange(31. * 100 * 100).reshape((31, 100, 100), order='C')
test2 = np.arange(31. * 100 * 100).reshape((31, 100, 100), order='F')
test3 = np.zeros((31, 100, 100))
test3[::3, :, :] = 1
# Scaling the three datasets to be between 0-255
test1_scaled = test1 / np.max(test1) * 255
test2_scaled = test2 / np.max(test2) * 255
test3_scaled = test3 / np.max(test3) * 255
# Creating three cortex.Volume objects with the test data as np.uint8
red = cortex.Volume(test1_scaled.astype(np.uint8), 'S1', 'fullhead')
green = cortex.Volume(test2_scaled.astype(np.uint8), 'S1', 'fullhead')
blue = cortex.Volume(test3_scaled.astype(np.uint8), 'S1', 'fullhead')
# This creates an RGB Volume from the three different color channels for
# this subject
# Note that you do not need to specify the transform when creating this as it
# is already specified in the red, green, and blue channels
vol_data = cortex.VolumeRGB(red, green, blue, subject)
cortex.quickshow(vol_data, with_colorbar=False)
plt.show()
def __init__(self, surface, transform, mask_type='thick', vmin=-2., vmax=2.):
if mask_type == '':
data = np.zeros((self.bufferlen, 30, 100, 100), 'float32')
else:
npts = cortex.db.get_mask(surface, transform, mask_type).sum()
data = np.zeros((self.bufferlen, npts), 'float32')
vol = cortex.Volume(data, surface, transform, vmin=vmin, vmax=vmax)
logger.debug('Starting pycortex viewer')
server = cortex.webgl.show(vol, open_browser=False, autoclose=False, port=8051)
logger.debug('Started pycortex viewer %s %s %s', surface, transform, mask_type)
view = server.get_client()
logger.debug('Client connected')
self.surface = surface
self.transform = transform
self.mask_type = mask_type
self.view = view
self.active = True
self.i = 0
def run(self, activity):
volume = cortex.Volume(activity, self.surface, self.transform,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
buf = io.BytesIO()
fig = cortex.quickflat.make_figure(volume, height=self.height)
plt.savefig(buf, format="PNG")
buf.seek(0)
content = buf.read()
requests.post(self.address, data={"flatmap.png": content})
# Creating three test datasets that are the same shape as this transform with
# one entry for this voxel
# The first two are gradients going in different directions across the brain
# and the third is stripes across certain slices of the brain
test1 = np.arange(31. * 100 * 100).reshape((31, 100, 100), order='C')
test2 = np.arange(31. * 100 * 100).reshape((31, 100, 100), order='F')
test3 = np.zeros((31, 100, 100))
test3[::3, :, :] = 1
# Scaling the three datasets to be between 0-255
test1_scaled = test1 / np.max(test1) * 255
test2_scaled = test2 / np.max(test2) * 255
test3_scaled = test3 / np.max(test3) * 255
# Creating three cortex.Volume objects with the test data as np.uint8
red = cortex.Volume(test1_scaled.astype(np.uint8), 'S1', 'fullhead')
green = cortex.Volume(test2_scaled.astype(np.uint8), 'S1', 'fullhead')
blue = cortex.Volume(test3_scaled.astype(np.uint8), 'S1', 'fullhead')
# This creates an RGB Volume from the three different color channels for
# this subject
# Note that you do not need to specify the transform when creating this as it
# is already specified in the red, green, and blue channels
vol_data = cortex.VolumeRGB(red, green, blue, subject)
cortex.quickshow(vol_data, with_colorbar=False)
plt.show()
import cortex
# First let's do this "manually", using cortex.mni
from cortex import mni
import numpy as np
np.random.seed(1234)
# This transform is gonna be from one specific functional space for a subject
# which is defined by the transform (xfm)
s1_to_mni = mni.compute_mni_transform(subject='S1', xfm='fullhead')
# s1_to_mni is a 4x4 array describing the transformation in homogeneous corods
# Transform data from subject to MNI space
# first we will create a dataset to transform
data = cortex.Volume.random('S1', 'fullhead')
# then transform it!
mni_data = mni.transform_to_mni(data, s1_to_mni)
# mni_data is a nibabel Nifti1Image
mni_data_vol = mni_data.get_data() # the actual array, shape=(182,218,182)
# That was the manual method. pycortex can also cache these transforms for you
# if you get them using the pycortex database
s1_to_mni_db = cortex.db.get_mnixfm('S1', 'fullhead')
# this is the same as s1_to_mni, but will return instantly on subsequent calls