Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def mock_chunk_read_(response, local_file, initial_size=0, chunk_size=8192,
report_hook=None, verbose=0):
if not isinstance(response, _basestring):
return _chunk_read_(response, local_file,
initial_size=initial_size,
chunk_size=chunk_size,
report_hook=report_hook, verbose=verbose)
return response
return mock_chunk_read_
def fit(self, imgs, y=None, confounds=None):
"""Compute the mask and the components
Parameters
----------
imgs: list of Niimg-like objects
See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
Data on which the PCA must be calculated. If this is a list,
the affine is considered the same for all.
"""
# Hack to support single-subject data:
if isinstance(imgs, (_basestring, nibabel.Nifti1Image)):
imgs = [imgs]
# This is a very incomplete hack, as it won't work right for
# single-subject list of 3D filenames
if len(imgs) == 0:
# Common error that arises from a null glob. Capture
# it early and raise a helpful message
raise ValueError('Need one or more Niimg-like objects as input, '
'an empty list was given.')
if confounds is None:
confounds = itertools.repeat(None, len(imgs))
# First, learn the mask
if not isinstance(self.mask, (NiftiMasker, MultiNiftiMasker)):
self.masker_ = MultiNiftiMasker(mask_img=self.mask,
smoothing_fwhm=self.smoothing_fwhm,
target_affine=self.target_affine,
"""
if not isinstance(imgs, (list, tuple)) or \
isinstance(imgs, _basestring):
imgs = [imgs, ]
single_subject = True
elif isinstance(imgs, (list, tuple)) and len(imgs) == 1:
single_subject = True
else:
single_subject = False
if confounds is None and isinstance(imgs, (list, tuple)):
confounds = [None] * len(imgs)
if confounds is not None:
if not isinstance(confounds, (list, tuple)) or \
isinstance(confounds, _basestring):
confounds = [confounds, ]
if len(confounds) != len(imgs):
raise ValueError("Number of confounds given does not match with "
"the given number of images.")
return imgs, confounds, single_subject
for sess in range(n_sessions):
subject_data.func[sess] = mem.cache(reslice_vols)(
subject_data.func[sess])
if write_output_images == 2:
_func.append(mem.cache(save_vols)(
subject_data.func[sess],
output_dir=subject_data.output_dir,
basenames=func_basenames[sess], prefix=func_prefix,
concat=concat))
if write_output_images == 2:
subject_data.func = _func
elif write_output_images == 1:
# write final output images
print("Saving preprocessed images unto disk...")
func_basenames = func_basenames[0] if (not isinstance(
func_basenames, _basestring) and concat) else func_basenames
_func = []
for sess in range(n_sessions):
if reslice:
subject_data.func[sess] = reslice_vols(subject_data.func[sess])
_func.append(mem.cache(save_vols)(
subject_data.func[sess], output_dir=subject_data.output_dir,
basenames=func_basenames[sess], prefix=func_prefix,
concat=concat))
subject_data.func = _func
# finalize
subject_data.finalize_report(last_stage=shutdown_reloaders)
if write_output_images:
subject_data.hardlink_output_files(final=True)
return subject_data.__dict__ if dict_input else subject_data
[1] Papadopoulos Orfanos, Dimitri, et al.
"The Brainomics/Localizer database."
NeuroImage 144.B (2017): 309.
[2] Pinel, Philippe, et al.
"Fast reproducible identification and large-scale databasing of
individual functional cognitive networks."
BMC Neuroscience 8.1 (2007): 91.
See Also
---------
nilearn.datasets.fetch_localizer_calculation_task
nilearn.datasets.fetch_localizer_button_task
"""
if isinstance(contrasts, _basestring):
raise ValueError('Contrasts should be a list of strings, but '
'a single string was given: "%s"' % contrasts)
if n_subjects is None:
n_subjects = 94 # 94 subjects available
if (isinstance(n_subjects, numbers.Number) and
((n_subjects > 94) or (n_subjects < 1))):
warnings.warn("Wrong value for \'n_subjects\' (%d). The maximum "
"value will be used instead (\'n_subjects=94\')")
n_subjects = 94 # 94 subjects available
# we allow the user to use alternatives to Brainomics contrast names
contrast_name_wrapper = {
# Checkerboard
"checkerboard": "checkerboard",
"horizontal checkerboard": "horizontal checkerboard",
"vertical checkerboard": "vertical checkerboard",
Parameters
----------
metadata : dict
Metadata to transform
Returns
-------
metadata : dict
Original metadata in which strings representing null values
have been replaced by ``None``.
"""
metadata = metadata.copy()
for key, value in metadata.items():
if (isinstance(value, _basestring) and
re.match(r'($|n/?a$|none|null)', value, re.IGNORECASE)):
metadata[key] = None
return metadata
def _repr_niimgs(niimgs):
""" Pretty printing of niimg or niimgs.
"""
if isinstance(niimgs, _basestring):
return niimgs
if isinstance(niimgs, collections.Iterable):
return '[%s]' % ', '.join(_repr_niimgs(niimg) for niimg in niimgs)
# Nibabel objects have a 'get_filename'
try:
filename = niimgs.get_filename()
if filename is not None:
return "%s('%s')" % (niimgs.__class__.__name__,
filename)
else:
return "%s(\nshape=%s,\naffine=%s\n)" % \
(niimgs.__class__.__name__,
repr(niimgs.shape),
repr(niimgs.affine))
except:
pass
confounds : list of CSV file paths or 2D matrices
This parameter is passed to nilearn.signal.clean. Please see the
related documentation for details. Should match with the list
of imgs given.
Returns
-------
self : object
Returns the instance itself. Contains attributes listed
at the object level.
"""
# Base fit for decomposition estimators : compute the embedded masker
if isinstance(imgs, _basestring):
if nilearn.EXPAND_PATH_WILDCARDS and glob.has_magic(imgs):
imgs = _resolve_globbing(imgs)
if isinstance(imgs, _basestring) or not hasattr(imgs, '__iter__'):
# these classes are meant for list of 4D images
# (multi-subject), we want it to work also on a single
# subject, so we hack it.
imgs = [imgs, ]
if len(imgs) == 0:
# Common error that arises from a null glob. Capture
# it early and raise a helpful message
raise ValueError('Need one or more Niimg-like objects as input, '
'an empty list was given.')
self.masker_ = check_embedded_nifti_masker(self)
See Also
--------
nilearn.image.largest_connected_component_img : To simply operate the
same manipulation directly on Nifti images.
Notes
-----
**Handling big-endian in given numpy.ndarray**
This function changes the existing byte-ordering information to new byte
order, if the given volume has non-native data type. This operation
is done inplace to avoid big-endian issues with scipy ndimage module.
"""
if hasattr(volume, "get_data") \
or isinstance(volume, _basestring):
raise ValueError('Please enter a valid numpy array. For images use\
largest_connected_component_img')
# Get the new byteorder to handle issues like "Big-endian buffer not
# supported on little-endian compiler" with scipy ndimage label.
if not volume.dtype.isnative:
volume.dtype = volume.dtype.newbyteorder('N')
# We use asarray to be able to work with masked arrays.
volume = np.asarray(volume)
labels, label_nb = ndimage.label(volume)
if not label_nb:
raise ValueError('No non-zero values: no connected components')
if label_nb == 1:
return volume.astype(np.bool)
label_count = np.bincount(labels.ravel().astype(np.int))
# discard the 0 label
'tmaps' string list (if 'get_tmaps' set to True)
Paths to nifti t maps
'masks': string list
Paths to nifti files corresponding to the subjects individual masks
'anats': string
Path to nifti files corresponding to the subjects structural images
References
----------
[1] Pinel, Philippe, et al.
"Fast reproducible identification and large-scale databasing of
individual functional cognitive networks."
BMC neuroscience 8.1 (2007): 91.
"""
if isinstance(contrasts, _basestring):
raise ValueError('Contrasts should be a list of strings, but '
'a single string was given: "%s"' % contrasts)
if n_subjects is None:
n_subjects = 94 # 94 subjects available
if (n_subjects > 94) or (n_subjects < 1):
warnings.warn("Wrong value for \'n_subjects\' (%d). The maximum "
"value will be used instead (\'n_subjects=94\')")
n_subjects = 94 # 94 subjects available
# we allow the user to use alternatives to Brainomics contrast names
contrast_name_wrapper = {
# Checkerboard
"checkerboard": "checkerboard",
"horizontal checkerboard": "horizontal checkerboard",
"vertical checkerboard": "vertical checkerboard",
"horizontal vs vertical checkerboard":