Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_simulator(tmpdir):
sim = Simulator()
r = 10
sigma = 1
y = [0, 1]
n_reps = 3
output_dir = str(tmpdir)
sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)
flist = glob.glob(str(tmpdir.join('centered*nii.gz')))
shape = (91, 109, 91)
sim_img = nb.concat_images(flist)
assert len(sim.data) == n_reps*len(y)
assert sim_img.shape[0:3] == shape
# specify contrasts
contrasts = {}
n_columns = len(design_matrix.names)
for i in xrange(paradigm.n_conditions):
contrasts['%s' % design_matrix.names[2 * i]
] = np.eye(n_columns)[2 * i]
# more interesting contrasts
contrasts['faces-scrambled'] = contrasts['faces'] - contrasts['scrambled']
contrasts['scrambled-faces'] = -contrasts['faces-scrambled']
contrasts['effects_of_interest'] = contrasts['faces'] + contrasts['scrambled']
# fit GLM
print 'Fitting a GLM (this takes time)...'
fmri_glm = FMRILinearModel(
[nibabel.concat_images(x) for x in subject_data.func],
[design_matrix.matrix for design_matrix in design_matrices],
mask='compute')
fmri_glm.fit(do_scaling=True, model='ar1')
# save computed mask
mask_path = os.path.join(subject_data.output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)
mask_images.append(mask_path)
# compute contrast maps
z_maps = {}
effects_maps = {}
for contrast_id, contrast_val in contrasts.iteritems():
print "\tcontrast id: %s" % contrast_id
z_map, t_map, effects_map, var_map = fmri_glm.contrast(
import os
import sys
# load the data
slice_order = 'ascending'
interleaved = False
if dataset == 'spm-auditory':
# pypreproces path
PYPREPROCESS_DIR = os.path.dirname(os.path.split(
os.path.abspath(__file__))[0])
sys.path.append(PYPREPROCESS_DIR)
from datasets_extras import fetch_spm_auditory_data
_subject_data = fetch_spm_auditory_data(data_dir)
fmri_img = ni.concat_images(_subject_data['func'],)
fmri_data = fmri_img.get_data()[:, :, :, 0, :]
compare_with = ni.concat_images(
[os.path.join(os.path.dirname(x),
"a" + os.path.basename(x))
for x in _subject_data['func']]).get_data()
TR = 7.
elif dataset == 'fsl-feeds':
PYPREPROCESS_DIR = os.path.dirname(os.path.split(
os.path.abspath(__file__))[0])
sys.path.append(PYPREPROCESS_DIR)
from datasets_extras import fetch_fsl_feeds_data
_subject_data = fetch_fsl_feeds_data(data_dir)
# modify the header of each 3D vol according to the
# estimated motion (realignment params)
sess_rvols = apply_realignment(self.vols_[sess],
self.realignment_parameters_[sess],
inverse=False)
# reslice vols
if reslice:
self._log('Reslicing volumes for session %i/%i...' % (
sess + 1, self.n_sessions))
sess_rvols = list(reslice_vols(sess_rvols))
self._log('...done; session %i/%i.' % (
sess + 1, self.n_sessions))
if concat_sess:
sess_rvols = nibabel.concat_images(sess_rvols)
if output_dir is None:
output['realigned_images'].append(sess_rvols)
# save output unto disk
if not output_dir is None:
# make basenames for output files
sess_basenames = None
if basenames is None:
if isinstance(self.vols[sess], str):
sess_basenames = get_basenames(self.vols[sess],
ext=ext)
elif isinstance(self.vols[sess], list):
if isinstance(self.vols[sess][0], str):
sess_basenames = get_basenames(self.vols[sess],
ext=ext)
Smoothened image, same type and size as the input img.
"""
if isinstance(img, str):
img = ni.load(img)
elif isinstance(img, tuple):
assert len(img) == 2
return smooth_image(ni.Nifti1Image(img[0], img[1]), fwhm, **kwargs)
elif isinstance(img, list):
return [smooth_image(x, fwhm, **kwargs) for x in img]
else:
assert is_niimg(img)
if len(img.shape) == 4:
return ni.concat_images(
[smooth_image(vol, fwhm, **kwargs)
for vol in ni.four_to_three(img)])
else:
assert len(img.shape) == 3
smoothing_kernel = LinearFilter(
img.get_affine(),
img.shape,
fwhm=fwhm,
**kwargs)
return ni.Nifti1Image(smoothing_kernel.smooth(img.get_data(),
clean=True),
img.get_affine())
def estimate_motion(nifti_image):
# BEGIN STDOUT SUPRESSION
actualstdout = sys.stdout
sys.stdout = open(os.devnull,'w')
# We want to use the middle time point as the reference. But the algorithm does't allow that, so fake it.
ref_vol = nifti_image.shape[3]/2 + 1
ims = nb.four_to_three(nifti_image)
reg = Realign4d(nb.concat_images([ims[ref_vol]] + ims), tr=1.) # in the next release, we'll need to add tr=1.
reg.estimate(loops=3) # default: loops=5
aligned = reg.resample(0)[:,:,:,1:]
sys.stdout = actualstdout
# END STDOUT SUPRESSION
abs_disp = []
rel_disp = []
transrot = []
prev_T = None
# skip the first one, since it's the reference volume
for T in reg._transforms[0][1:]:
# get the full affine for this volume by pre-multiplying by the reference affine
#mc_affine = np.dot(ni.get_affine(), T.as_affine())
transrot.append(T.translation.tolist()+T.rotation.tolist())
# Compute the mean displacement
# See http://www.fmrib.ox.ac.uk/analysis/techrep/tr99mj1/tr99mj1/node5.html
def _gen_fmri_data(self, df, weights, roi_coords, runno, blocklen):
nmeasr = len(df)
# make a time series of the slices
nim = nb.concat_images(['slice_red.nii']*nmeasr)
data = nim.get_data()
ori = nb.io_orientation(nim.get_affine())
data = nb.apply_orientation(data, ori)
#import pdb; pdb.set_trace()
# make an empty ROI
coords = roi_coords + (np.s_[0:len(df)],)
#import pdb; pdb.set_trace()
#roi = np.zeros(data.shape)[coords]
roi = np.zeros([c.stop - c.start for c in coords])
#print roi.shape
# compute response in a single voxel
resp = self.hrfs(weights, blocklen) #* weights_full
#plt.plot(resp); plt.show()
for cname, iname in zip(in_files, in_idxs):
f = np.load(iname)
idxs = np.squeeze(f['arr_0'])
for d, fname in enumerate(nii):
data = nb.load(fname, mmap=NUMPY_MMAP).get_data().reshape(-1)
cdata = nb.load(
cname, mmap=NUMPY_MMAP).get_data().reshape(-1, ndirs)[:, d]
nels = len(idxs)
idata = (idxs, )
data[idata] = cdata[0:nels]
nb.Nifti1Image(data.reshape(rsh[:3]), aff,
hdr).to_filename(fname)
imgs = [nb.load(im, mmap=NUMPY_MMAP) for im in nii]
allim = nb.concat_images(imgs)
allim.to_filename(out_file)
return out_file
def _get_timeseries(data, row_mask, affine=None):
if isinstance(data, list):
return nb.concat_images(np.array(data)[row_mask])
elif isinstance(data, (str, unicode)):
img = nb.load(data)
return nb.Nifti1Image(img.get_data()[row_mask, :], img.get_affine())
elif isinstance(data, (np.ndarray, np.memmap)):
if affine is None:
raise Exception('The affine is not optional '
'when data is an array')
return nb.Nifti1Image(data[row_mask, :], affine)
else:
raise ValueError('Data type "%s" not supported' % type(data))