Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
###############################################################################
# Generate mask with a high lower cutoff
#
# The NiftiMasker calls the nilearn.masking.compute_epi_mask function to
# compute the mask from the EPI. It has two important parameters:
# lower_cutoff and upper_cutoff. These set the grey-value bounds in which
# the masking algorithm will search for its threshold (0 being the
# minimum of the image and 1 the maximum). We will here increase the
# lower cutoff to enforce selection of those voxels that appear as bright
# in the EPI image.
masker = NiftiMasker(mask_strategy='epi',
mask_args=dict(upper_cutoff=.9, lower_cutoff=.8,
opening=False))
masker.fit(epi_img)
plot_roi(masker.mask_img_, mean_img,
title='EPI Mask: high lower_cutoff')
###############################################################################
# Computing the mask from the MNI template
###############################################################################
#
# A mask can also be computed from the MNI gray matter template. In this
# case, it is resampled to the target image
masker = NiftiMasker(mask_strategy='template')
masker.fit(epi_img)
plot_roi(masker.mask_img_, mean_img,
title='Mask from template')
###############################################################################
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_data()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
if type == 'r':
plotting.plot_roi(roi_img=img, bg_img=background, threshold=0, vmin=0.1, vmax=1,
title="Similarity", resampling_interpolation="continuous")
if type == 't':
plotting.plot_roi(roi_img=img, bg_img=background, threshold=0, vmin=-7, vmax=7,
title="Similarity", resampling_interpolation="continuous")
plt.show()
#############################################################################
# MASSP Parcellation
# ---------------------
# Finally, we use the MASSP algorithm to parcellate the subcortex
massp = nighres.parcellation.massp(target_images=[dataset['qr1'],dataset['qr2s'],dataset['qsm']],
map_to_target=ants['inverse'],
max_iterations=120, max_difference=0.1,
save_data=True, file_name="sample-subject",
output_dir=out_dir, overwrite=False)
############################################################################
# Now we look at the topology-constrained segmentation MGDM created
if not skip_plots:
plotting.plot_roi(massp['max_label'], dataset['qr1'],
annotate=False, black_bg=False, draw_cross=False,
cmap='cubehelix')
plotting.plot_img(massp['max_proba'],
vmin=0, vmax=1, cmap='gray', colorbar=True,
annotate=False, draw_cross=False)
############################################################################
#############################################################################
# If the example is not run in a jupyter notebook, render the plots:
if not skip_plots:
plotting.show()
# .. tip:: in Nighres functions that have several outputs return a
# dictionary storing the different outputs. You can find the keys in the
# docstring by typing ``nighres.brain.mp2rage_skullstripping?`` or list
# them with ``skullstripping_results.keys()``
#
# To check if the skull stripping worked well we plot the brain mask on top of
# the original image. You can also open the images stored in ``out_dir`` in
# your favourite interactive viewer and scroll through the volume.
#
# Like Nilearn, we use Nibabel SpatialImage objects to pass data internally.
# Therefore, we can directly plot the outputs using `Nilearn plotting functions
# `_
# .
if not skip_plots:
plotting.plot_roi(skullstripping_results['brain_mask'], dataset['t1w'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
############################################################################
# .. image:: ../_static/tissue_classification1.png
#############################################################################
#############################################################################
# MGDM classification
# ---------------------
# Next, we use the masked data as input for tissue classification with the MGDM
# algorithm. MGDM works with a single contrast, but can be improved with
# additional contrasts. In this case we use the T1-weigthed image as well as
# the quantitative T1map.
mgdm_results = nighres.brain.mgdm_segmentation(
contrast_image1=skullstripping_results['t1w_masked'],
See http://nilearn.github.io/manipulating_images/input_output.html
The mask image; it could be binary mask or an atlas or ROIs
with integer values.
bg_img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
The background image that the mask will be plotted on top of.
To turn off background image, just pass "bg_img=None".
Returns
-------
mask_plot_svg: str
SVG Image Data URL for the mask plot.
"""
if mask_img:
mask_plot = plot_roi(roi_img=mask_img,
bg_img=bg_img,
display_mode='z',
cmap='Set1',
)
mask_plot # So flake8 doesn't complain about not using variable (F841)
mask_plot_svg = plot_to_svg(plt.gcf())
# prevents sphinx-gallery & jupyter from scraping & inserting plots
plt.close()
else:
mask_plot_svg = None # HTML image tag's alt attribute is used.
return mask_plot_svg
# import os
# region_labels.to_filename(os.path.join(folder_path,
# 'relabeled_yeo_atlas.nii.gz'))
##############################################################################
# Different connectivity modes
# -----------------------------
#
# Using the parameter connect_diag=False we separate in addition two regions
# that are connected only along the diagonal.
region_labels_not_diag = connected_label_regions(atlas_yeo,
connect_diag=False)
plotting.plot_roi(region_labels_not_diag,
title='Relabeling and connect_diag=False',
cut_coords=(8, -4, 9), colorbar=True, cmap='Paired')
##############################################################################
# A consequence of using connect_diag=False is that we can get a lot of
# small regions, around 110 judging from the colorbar.
#
# Hence we suggest use connect_diag=True
##############################################################################
# Parameter min_size
# -------------------
#
# In the above, we get around 110 regions, but many of these are very
# small. We can remove them with the min_size parameter, keeping only the
# **Identification of connected components** - The function
# :func:`scipy.ndimage.label` from the scipy Python library identifies
# immediately neighboring voxels in our voxels mask. It assigns a separate
# integer label to each one of them.
labels, n_labels = ndimage.label(dil_bin_p_values_and_vt)
# we take first roi data with labels assigned as integer 1
first_roi_data = (labels == 5).astype(np.int)
# Similarly, second roi data is assigned as integer 2
second_roi_data = (labels == 3).astype(np.int)
# Visualizing the connected components
# First, we create a Nifti image type from first roi data in a array
first_roi_img = new_img_like(fmri_img, first_roi_data)
# Then, visualize the same created Nifti image in first argument and mean of
# functional images as background (second argument), cut_coords is default now
# and coordinates are selected automatically pointed exactly on the roi data
plot_roi(first_roi_img, mean_img, title='Connected components: first ROI')
# we do the same for second roi data
second_roi_img = new_img_like(fmri_img, second_roi_data)
# Visualization goes here with second roi image and cut_coords are default with
# coordinates selected automatically pointed on the data
plot_roi(second_roi_img, mean_img, title='Connected components: second ROI')
##############################################################################
# Use the new ROIs, to extract data maps in both ROIs
# We extract data from ROIs using nilearn's NiftiLabelsMasker
from nilearn.input_data import NiftiLabelsMasker
# Before data extraction, we convert an array labels to Nifti like image. All
# inputs to NiftiLabelsMasker must be Nifti-like images or filename to Nifti
# images. We use the same reference image as used above in previous sections
# We use ndimage function from scipy Python library for mask dilation
from scipy import ndimage
# Input here is a binarized and intersected mask data from previous section
dil_bin_p_values_and_vt = ndimage.binary_dilation(bin_p_values_and_vt)
# Now, we visualize the same using `plot_roi` with data being converted to Nifti
# image. In all new image like, reference image is the same but second argument
# varies with data specific
dil_bin_p_values_and_vt_img = new_img_like(
fmri_img,
dil_bin_p_values_and_vt.astype(np.int))
# Visualization goes here without 'L', 'R' annotation and coordinates being the
# same
plot_roi(dil_bin_p_values_and_vt_img, mean_img,
title='Dilated mask', cut_coords=cut_coords,
annotate=False)
#############################################################################
# Finally, we end with splitting the connected ROIs to two hemispheres into two
# separate regions (ROIs). The function `scipy.ndimage.label` from the scipy
# Python library.
##############################################################################
# **Identification of connected components** - The function
# :func:`scipy.ndimage.label` from the scipy Python library identifies
# immediately neighboring voxels in our voxels mask. It assigns a separate
# integer label to each one of them.
labels, n_labels = ndimage.label(dil_bin_p_values_and_vt)
# we take first roi data with labels assigned as integer 1
first_roi_data = (labels == 5).astype(np.int)
# Similarly, second roi data is assigned as integer 2
###############################################################################
# Plotting anatomical images with function `plot_anat`
# -----------------------------------------------------
#
# Visualizing anatomical image of haxby dataset
plotting.plot_anat(haxby_anat_filename, title="plot_anat")
###############################################################################
# Plotting ROIs (here the mask) with function `plot_roi`
# -------------------------------------------------------
#
# Visualizing ventral temporal region image from haxby dataset overlayed on
# subject specific anatomical image with coordinates positioned automatically on
# region of interest (roi)
plotting.plot_roi(haxby_mask_filename, bg_img=haxby_anat_filename,
title="plot_roi")
###############################################################################
# Plotting EPI image with function `plot_epi`
# ---------------------------------------------
# Import image processing tool
from nilearn import image
# Compute the voxel_wise mean of functional images across time.
# Basically reducing the functional image from 4D to 3D
mean_haxby_img = image.mean_img(haxby_func_filename)
# Visualizing mean image (3D)
plotting.plot_epi(mean_haxby_img, title="plot_epi")
# them with ``skullstripping_results.keys()``
#
# To check if the skull stripping worked well we plot the brain mask on top of
# the original image. You can also open the images stored in ``out_dir`` in
# your favourite interactive viewer and scroll through the volume.
#
# Like Nilearn, we use Nibabel SpatialImage objects to pass data internally.
# Therefore, we can directly plot the outputs using `Nilearn plotting functions
# `_
# .
if not skip_plots:
plotting.plot_roi(skullstripping_results1['brain_mask'], dataset1['t1map'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
plotting.plot_roi(skullstripping_results2['brain_mask'], dataset2['t1w'],
annotate=False, black_bg=False, draw_cross=False,
cmap='autumn')
############################################################################
#############################################################################
# SyN co-registration
# --------------------
# Next, we use the masked data as input for co-registration. The T1 maps are
# used here as they are supposed to be more similar
syn_results = nighres.registration.embedded_antsreg(
source_image=skullstripping_results1['t1map_masked'],
target_image=skullstripping_results2['t1map_masked'],
run_rigid=True, run_syn=True,
rigid_iterations=1000, coarse_iterations=40,
medium_iterations=0, fine_iterations=0,