How to use the nibabel.aff2axcodes function in nibabel

To help you get started, we’ve selected a few nibabel examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nipy / dipy / dipy / io / utils.py View on Github external
is_nifti = True
    elif isinstance(reference, dict) and 'magic_number' in reference:
        header = reference
        is_trk = True
    elif isinstance(reference, dipy.io.stateful_tractogram.StatefulTractogram):
        is_sft = True

    if is_nifti:
        affine = header.get_best_affine()
        dimensions = header['dim'][1:4]
        voxel_sizes = header['pixdim'][1:4]

        if not affine[0:3, 0:3].any():
            raise ValueError('Invalid affine, contains only zeros.'
                             'Cannot determine voxel order from transformation')
        voxel_order = ''.join(nib.aff2axcodes(affine))
    elif is_trk:
        affine = header['voxel_to_rasmm']
        dimensions = header['dimensions']
        voxel_sizes = header['voxel_sizes']
        voxel_order = header['voxel_order']
    elif is_sft:
        affine, dimensions, voxel_sizes, voxel_order = reference.space_attributes
    else:
        raise TypeError('Input reference is not one of the supported format')

    if isinstance(voxel_order, np.bytes_):
        voxel_order = voxel_order.decode('utf-8')

    # Run this function to logging the warning from it
    is_reference_info_valid(affine, dimensions, voxel_sizes, voxel_order)
github photon-team / photon / Photon_Neuro / BrainAtlas.py View on Github external
self.gotData = True

        # get ROI infos
        rois = self._getROIs(whichROIs=whichROIs, background_id=background_id)

        # Grab masker and apply to structural data for each ROI
        from nilearn.input_data import NiftiMasker
        from nilearn import image

        try:
            img = load_img(X[0])
        except:
            img = X[0]

        import nibabel as nib
        orient_data = ''.join(nib.aff2axcodes(img.affine))

        roi_data = []
        if extract_mode == 'box':
            self.box_shape = []
        i = 0
        out_ind = ()
        for roi in rois:
            roi = image.resample_img(roi, target_affine=img.affine, target_shape=img.shape, interpolation='nearest')

            # check orientations
            orient_roi = ''.join(nib.aff2axcodes(roi.affine))
            orient_ok = orient_roi==orient_data
            if not orient_ok:
                print('Orientation of mask and data are not the same: ' + orient_roi + ' (mask) vs. ' + orient_data + ' (data)')
                break
github nighres / nighres / nighres / brain / mgdm_segmentation.py View on Github external
def _get_mgdm_orientation(affine, mgdm):
    '''
    Transforms nibabel affine information into
    orientation and slice order that MGDM understands
    '''
    orientation = nb.aff2axcodes(affine)
    # set mgdm slice order
    if orientation[-1] == "I" or orientation[-1] == "S":
        sliceorder = mgdm.AXIAL
    elif orientation[-1] == "L" or orientation[-1] == "R":
        sliceorder = mgdm.SAGITTAL
    else:
        sliceorder = mgdm.CORONAL

    # set mgdm orientations
    if "L" in orientation:
        LR = mgdm.R2L
    elif "R" in orientation:
        LR = mgdm.L2R  # flipLR = True
    if "A" in orientation:
        AP = mgdm.P2A  # flipAP = True
    elif "P" in orientation:
github photon-team / photon / photonai / neuro / brain_atlas.py View on Github external
def _resample(mask, target_affine, target_shape):
        if target_affine is not None and target_shape is not None:
            mask = image.resample_img(mask, target_affine=target_affine, target_shape=target_shape, interpolation='nearest')
            # check orientations
            orient_data = ''.join(nib.aff2axcodes(target_affine))
            orient_roi = ''.join(nib.aff2axcodes(mask.affine))
            if not orient_roi == orient_data:
                logger.error('Orientation of mask and data are not the same: ' + orient_roi + ' (mask) vs. ' + orient_data + ' (data)')
        return mask
github poldracklab / niworkflows / niworkflows / interfaces / cifti.py View on Github external
('L', 'A', 'S')

    >>> _reorient_image(img, orientation='LPI')
    Traceback (most recent call last):
      ...
    NotImplementedError: Cannot reorient ...

    >>> _reorient_image(img)
    Traceback (most recent call last):
      ...
    RuntimeError: No orientation ...

    """
    orient0 = nb.aff2axcodes(img.affine)
    if target_img is not None:
        orient1 = nb.aff2axcodes(target_img.affine)
    elif orientation is not None:
        orient1 = tuple(orientation)
    else:
        raise RuntimeError("No orientation to reorient to!")

    if orient0 == orient1:  # already in desired orientation
        return img
    elif orient0 == tuple("RAS") and orient1 == tuple("LAS"):  # RAS -> LAS
        return img.as_reoriented([[0, -1], [1, 1], [2, 1]])
    else:
        raise NotImplementedError(
            "Cannot reorient {0} to {1}.".format(orient0, orient1)
        )
github mne-tools / mne-python / mne / viz / _3d.py View on Github external
ax = 'y'
            y = params['ax_x'].lines[0].get_xdata()[0]
            x, z = event.xdata, event.ydata
        elif event.inaxes is params['ax_z']:
            ax = 'z'
            x, y = event.xdata, event.ydata
            z = params['ax_x'].lines[1].get_ydata()[0]
        else:
            logger.debug('    Click outside axes')
            return None
        cut_coords = np.array((x, y, z))
        logger.debug('')

        if params['mode'] == 'glass_brain':  # find idx for MIP
            # Figure out what XYZ in world coordinates is in our voxel data
            codes = ''.join(nib.aff2axcodes(params['img_idx'].affine))
            assert len(codes) == 3
            # We don't care about directionality, just which is which dim
            codes = codes.replace('L', 'R').replace('P', 'A').replace('I', 'S')
            idx = codes.index(dict(x='R', y='A', z='S')[ax])
            img_data = np.abs(_get_img_fdata(params['img_idx']))
            ijk = _cut_coords_to_ijk(cut_coords, params['img_idx'])
            if idx == 0:
                ijk[0] = np.argmax(img_data[:, ijk[1], ijk[2]])
                logger.debug('    MIP: i = %d idx' % (ijk[0],))
            elif idx == 1:
                ijk[1] = np.argmax(img_data[ijk[0], :, ijk[2]])
                logger.debug('    MIP: j = %d idx' % (ijk[1],))
            else:
                ijk[2] = np.argmax(img_data[ijk[0], ijk[1], :])
                logger.debug('    MIP: k = %d idx' % (ijk[2],))
            cut_coords = _ijk_to_cut_coords(ijk, params['img_idx'])
github NifTK / NiftyNet / niftynet / utilities / subject.py View on Github external
def __reorient_to_original(self, data_5d):
        if (data_5d is None) or (data_5d.shape is ()):
            return None
        image_affine = self._read_original_affine()
        ornt_original = nib.orientations.axcodes2ornt(
            nib.aff2axcodes(image_affine))
        return util.do_reorientation(data_5d,
                                     STANDARD_ORIENTATION,
                                     ornt_original)
github photon-team / photon / photonai / neuro / brain_atlas.py View on Github external
def _resample(mask, target_affine, target_shape):
        if target_affine is not None and target_shape is not None:
            mask = image.resample_img(mask, target_affine=target_affine, target_shape=target_shape, interpolation='nearest')
            # check orientations
            orient_data = ''.join(nib.aff2axcodes(target_affine))
            orient_roi = ''.join(nib.aff2axcodes(mask.affine))
            if not orient_roi == orient_data:
                logger.error('Orientation of mask and data are not the same: ' + orient_roi + ' (mask) vs. ' + orient_data + ' (data)')
        return mask