Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def report_flm_fiac(): # pragma: no cover
data = nistats_datasets.fetch_fiac_first_level()
fmri_img = [data['func1'], data['func2']]
from nilearn.image import mean_img
mean_img_ = mean_img(fmri_img[0])
design_files = [data['design_matrix1'], data['design_matrix2']]
design_matrices = [pd.DataFrame(np.load(df)['X']) for df in design_files]
fmri_glm = FirstLevelModel(mask_img=data['mask'], minimize_memory=True)
fmri_glm = fmri_glm.fit(fmri_img, design_matrices=design_matrices)
n_columns = design_matrices[0].shape[1]
contrasts = {
'SStSSp_minus_DStDSp': _pad_vector([1, 0, 0, -1], n_columns),
'DStDSp_minus_SStSSp': _pad_vector([-1, 0, 0, 1], n_columns),
'DSt_minus_SSt': _pad_vector([-1, -1, 1, 1], n_columns),
'DSp_minus_SSp': _pad_vector([-1, 1, -1, 1], n_columns),
'DSt_minus_SSt_for_DSp': _pad_vector([0, -1, 0, 1], n_columns),
'DSp_minus_SSp_for_DSt': _pad_vector([0, 0, -1, 1], n_columns),
tick_position = tick_position + .09
plt.ylabel('Classification accurancy (f1 score)')
plt.xlabel('Visual stimuli category')
plt.ylim(ymin=0)
plt.legend(loc='lower center', ncol=3)
plt.title(
'Category-specific classification accuracy for different classifiers')
plt.tight_layout()
###############################################################################
# Finally, w plot the face vs house map for the different classifiers
# Use the average EPI as a background
from nilearn import image
mean_epi_img = image.mean_img(func_filename)
# Restrict the decoding to face vs house
condition_mask = stimuli.isin(['face', 'house'])
masked_timecourses = masked_timecourses[
condition_mask[task_mask]]
stimuli = (stimuli[condition_mask] == 'face')
# Transform the stimuli to binary values
stimuli.astype(np.int)
from nilearn.plotting import plot_stat_map, show
for classifier_name, classifier in sorted(classifiers.items()):
classifier.fit(masked_timecourses, stimuli)
if hasattr(classifier, 'coef_'):
weights = classifier.coef_[0]
condition_mask_train = np.logical_and(condition_mask, labels['chunks'] <= 6)
condition_mask_test = np.logical_and(condition_mask, labels['chunks'] > 6)
# Apply this sample mask to X (fMRI data) and y (behavioral labels)
# Because the data is in one single large 4D image, we need to use
# index_img to do the split easily
from nilearn.image import index_img
func_filenames = data_files.func[0]
X_train = index_img(func_filenames, condition_mask_train)
X_test = index_img(func_filenames, condition_mask_test)
y_train = target[condition_mask_train]
y_test = target[condition_mask_test]
# Compute the mean epi to be used for the background of the plotting
from nilearn.image import mean_img
background_img = mean_img(func_filenames)
##############################################################################
# Fit SpaceNet with a Graph-Net penalty
from nilearn.decoding import SpaceNetClassifier
# Fit model on train data and predict on test data
decoder = SpaceNetClassifier(memory="nilearn_cache", penalty='graph-net')
decoder.fit(X_train, y_train)
y_pred = decoder.predict(X_test)
accuracy = (y_pred == y_test).mean() * 100.
print("Graph-net classification accuracy : %g%%" % accuracy)
# Visualization
from nilearn.plotting import plot_stat_map, show
coef_img = decoder.coef_img_
plot_stat_map(coef_img, background_img,
###############################################################################
# From raw EPI data
# Load NYU resting-state dataset
nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1)
nyu_filename = nyu_dataset.func[0]
nyu_img = nibabel.load(nyu_filename)
# Restrict nyu to 100 frames to speed up computation
from nilearn.image import index_img
nyu_img = index_img(nyu_img, slice(0, 100))
# To display the background
nyu_mean_img = image.mean_img(nyu_img)
# Simple mask extraction from EPI images
# We need to specify an 'epi' mask_strategy, as this is raw EPI data
masker = NiftiMasker(mask_strategy='epi')
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI automatic mask')
# Generate mask with strong opening
masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10))
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask with strong opening')
# Generate mask with a high lower cutoff
masker = NiftiMasker(mask_strategy='epi',
mask_args=dict(upper_cutoff=.9, lower_cutoff=.8,
plotting.plot_stat_map(localizer_tmap_filename, display_mode='yz',
cut_coords=[-27, 60],
title="display_mode='yz', cut_coords=[-27, 60]")
###############################################################################
# In second part, we switch to demonstrating various features add_* from
# nilearn where each specific feature will be helpful in projecting brain
# imaging results for further interpretation.
# Import image processing tool for basic processing of functional brain image
from nilearn import image
# Compute voxel-wise mean functional image across time dimension. Now we have
# functional image in 3D assigned in mean_haxby_img
mean_haxby_img = image.mean_img(haxby_func_filename)
########################################
# Now let us see how to use `add_edges`, method useful for checking
# coregistration by overlaying anatomical image as edges (red) on top of
# mean functional image (background), both being of same subject.
# First, we call the `plot_anat` plotting function, with a background image
# as first argument, in this case the mean fMRI image.
display = plotting.plot_anat(mean_haxby_img, title="add_edges")
# We are now able to use add_edges method inherited in plotting object named as
# display. First argument - anatomical image and by default edges will be
# displayed as red 'r', to choose different colors green 'g' and blue 'b'.
display.add_edges(haxby_anat_filename)
########################################
print(B0)
B0_bbr = f"{preproc_dir}/{str(B0)}_B0.nii.gz"
cmd = f"fslroi {dwi_file} {B0_bbr} {str(B0)} 1"
cmds.append(cmd)
B0s_bbr.append(B0_bbr)
for cmd in cmds:
print(cmd)
run(cmd)
# Get mean B0
B0s_bbr_imgs = []
for B0 in B0s_bbr:
B0s_bbr_imgs.append(nib.load(B0))
mean_B0 = mean_img(B0s_bbr_imgs)
nib.save(mean_B0, nodif_B0)
# Get mean B0 brain mask
cmd = f"bet {nodif_B0} {nodif_B0_bet} -m -f 0.2"
run(cmd)
return gtab, nodif_B0, nodif_B0_mask
# Retrieve the SVC discriminating weights
coef_ = svc.coef_
# Reverse masking thanks to the Nifti Masker
coef_img = nifti_masker.inverse_transform(coef_)
# Save the coefficients as a Nifti image
coef_img.to_filename('haxby_svc_weights.nii')
###########################################################################
# Visualize the discriminating weights over the mean EPI
from nilearn.image import mean_img
from nilearn.plotting import plot_roi, plot_stat_map, show
mean_epi = mean_img(func_filename)
plot_stat_map(coef_img, mean_epi, title="SVM weights", display_mode="yx")
###########################################################################
# Plot also the mask that was computed by the NiftiMasker
plot_roi(nifti_masker.mask_img_, mean_epi, title="Mask", display_mode="yx")
show()
display_mode: string (optional, defaults to 'ortho')
display_mode param
cmap: matplotlib colormap object (optional, defaults to spectral)
colormap to user for plots
output_filename: string (optional)
path where plot will be stored
"""
# sanity
if cmap is None:
cmap = plt.cm.gray # registration QA always gray cmap!
reference_img = mean_img(reference_img)
coregistered_img = mean_img(coregistered_img)
if cut_coords is None:
cut_coords = (-10, -28, 17)
if display_mode in ['x', 'y', 'z']:
cut_coords = (cut_coords['xyz'.index(display_mode)],)
# XXX nilearn complains about rotations in affine, etc.
coregistered_img = reorder_img(coregistered_img, resample="continuous")
_slicer = plot_img(coregistered_img, cmap=cmap, cut_coords=cut_coords,
display_mode=display_mode, black_bg=True)
# XXX nilearn complains about rotations in affine, etc.
reference_img = reorder_img(reference_img, resample="continuous")
grouped_fmri_masked,
grouped_conditions_encoded) # f_regression implicitly adds intercept
pvals_bonferroni *= fmri_masked.shape[1]
pvals_bonferroni[np.isnan(pvals_bonferroni)] = 1
pvals_bonferroni[pvals_bonferroni > 1] = 1
neg_log_pvals_bonferroni = -np.log10(pvals_bonferroni)
neg_log_pvals_bonferroni_unmasked = nifti_masker.inverse_transform(
neg_log_pvals_bonferroni)
### Visualization #############################################################
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
# Use the fmri mean image as a surrogate of anatomical data
from nilearn import image
mean_fmri_img = image.mean_img(func_filename)
# Various plotting parameters
z_slice = -17 # plotted slice
from nilearn.image.resampling import coord_transform
affine = signed_neg_log_pvals_unmasked.get_affine()
_, _, k_slice = coord_transform(0, 0, z_slice,
linalg.inv(affine))
k_slice = np.round(k_slice)
threshold = -np.log10(0.1) # 10% corrected
vmax = min(signed_neg_log_pvals.max(),
neg_log_pvals_bonferroni.max())
# Plot thresholded p-values map corresponding to F-scores
fig = plt.figure(figsize=(4, 5.5), facecolor='k')