How to use the nibabel.trackvis.read function in nibabel

To help you get started, we’ve selected a few nibabel examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github nipy / dipy / doc / examples_0.5 / tractography_clustering.py View on Github external
from dipy.io.pickles import save_pickle
from dipy.viz import fvtk


#fname='/home/user/Data_Backup/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'
#fname='/home/user/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'
from dipy.data import get_data

fname=get_data('fornix')
print(fname)

"""
Load Trackvis file for *Fornix*:
"""

streams,hdr=tv.read(fname)

"""
Copy tracks:
"""

T=[i[0] for i in streams]

"""
Downsample tracks to 12 points:
"""

tracks=[tm.downsample(t, 12) for t in T]

"""
Delete unnecessary data:
"""
github pelednoam / mmvt / src / preproc / dti.py View on Github external
def load_tracula_trk(subject):
    tracks_fols = utils.get_subfolders(op.join(DTI_DIR, subject, 'dpath'))
    output_fol = op.join(BLENDER_ROOT_DIR, subject, 'dti', 'tracula')
    utils.make_dir(output_fol)
    for track_fol in tracks_fols:
        track_fol_name = os.path.basename(track_fol)
        print('Reading {}'.format(track_fol_name))
        track_gen, hdr = nib.trackvis.read(op.join(track_fol, 'path.pd.trk'), as_generator=True, points_space='rasmm')
        hdr = convert_header(hdr)
        vox2ras_trans = get_vox2ras_trans(subject)
        tracks = read_tracks(track_gen, hdr, vox2ras_trans)
        output_fname = op.join(output_fol, '{}.pkl'.format(track_fol_name))
        utils.save(tracks, output_fname)
        print('Save in {}'.format(output_fname))
github LTS5 / cmp / cmp / stages / connectionmatrix / creatematrix.py View on Github external
def cmat(): 
    """ Create the connection matrix for each resolution using fibers and ROIs. """
              
    # create the endpoints for each fibers
    en_fname  = op.join(gconf.get_cmp_fibers(), 'endpoints.npy')
    en_fnamemm  = op.join(gconf.get_cmp_fibers(), 'endpointsmm.npy')
    ep_fname  = op.join(gconf.get_cmp_fibers(), 'lengths.npy')
    curv_fname  = op.join(gconf.get_cmp_fibers(), 'meancurvature.npy')
    intrk = op.join(gconf.get_cmp_fibers(), 'streamline_filtered.trk')

    fib, hdr    = nibabel.trackvis.read(intrk, False)
    
    # Previously, load_endpoints_from_trk() used the voxel size stored
    # in the track hdr to transform the endpoints to ROI voxel space.
    # This only works if the ROI voxel size is the same as the DSI/DTI
    # voxel size.  In the case of DTI, it is not.  
    # We do, however, assume that all of the ROI images have the same
    # voxel size, so this code just loads the first one to determine
    # what it should be
    firstROIFile = op.join(gconf.get_cmp_tracto_mask_tob0(), 
                           gconf.parcellation.keys()[0],
                           'ROIv_HR_th.nii.gz')
    firstROI = nibabel.load(firstROIFile)
    roiVoxelSize = firstROI.get_header().get_zooms()
    (endpoints,endpointsmm) = create_endpoints_array(fib, roiVoxelSize)
    np.save(en_fname, endpoints)
    np.save(en_fnamemm, endpointsmm)
github nipy / dipy / doc / examples / segment_quickbundles_advanced.py View on Github external
The following examples show advanced usages of QuickBundles [Garyfallidis12]_
and the clustering framework. If you are not familiar with either one you should
check :ref:`example_segment_quickbundles` for an introduction to tractography
clustering with QuickBundles, or check :ref:`clustering-framework` to have a basic
understanding of how the clustering framework works in Dipy.

First import the necessary modules and load a small streamline bundle.
"""

from nibabel import trackvis as tv
from dipy.segment.clustering import QuickBundles
from dipy.data import get_data

fname = get_data('fornix')
streams, hdr = tv.read(fname)
streamlines = [i[0] for i in streams]

"""
QuickBundles using the `ResampleFeature`
========================================
By default, QuickBundles algorithm internally uses a representation of
streamlines that are either downsampled or upsampled so they have 12 points.
To tell QuickBundles to use a different number of points when resampling, one
needs to use the `ResampleFeature` feature.

Perform QuickBundles clustering using the MDF metric and a 10mm distance
threshold on streamlines that will be internally resampled to 24 points.
*Note `ResampleFeature` performs the resampling on the fly so there are no
permanent modifications made to your streamlines.*
"""
github nipy / dipy / doc / examples / interact_data.py View on Github external
img = nib.load(dname + 't1_brain_warp.nii.gz')
data = img.get_data()
affine = img.get_affine()


img_fa = nib.load(dname + 'results/metrics/fa.nii')
fa = img_fa.get_data()
affine_fa = img_fa.get_affine()


streams, hdr = tv.read(dname + 'results/bundles/cst.right.trk',
                       points_space="rasmm")
streamlines = [s[0] for s in streams]

streams, hdr = tv.read(dname + 'results/bundles/af.left.trk',
                       points_space="rasmm")
streamlines += [s[0] for s in streams]

streams, hdr = tv.read(dname + 'results/bundles/cc_1.trk',
                       points_space="rasmm")
streamlines += [s[0] for s in streams]

if not world_coords:
    from dipy.tracking.streamline import transform_streamlines
    streamlines = transform_streamlines(streamlines, np.linalg.inv(affine))

renderer = window.renderer()

stream_actor = actor.streamtube(streamlines, fa)

if not world_coords:
github nipy / nipype / nipype / interfaces / dipy / tracks.py View on Github external
def _run_interface(self, runtime):
        from numpy import min_scalar_type
        from dipy.tracking.utils import density_map

        tracks, header = nbt.read(self.inputs.in_file)
        streams = ((ii[0]) for ii in tracks)

        if isdefined(self.inputs.reference):
            refnii = nb.load(self.inputs.reference)
            affine = refnii.affine
            data_dims = refnii.shape[:3]
            kwargs = dict(affine=affine)
        else:
            IFLOGGER.warn('voxel_dims and data_dims are deprecated as of dipy '
                          '0.7.1. Please use reference input instead')

            if not isdefined(self.inputs.data_dims):
                data_dims = header['dim']
            else:
                data_dims = self.inputs.data_dims
            if not isdefined(self.inputs.voxel_dims):
github nipy / dipy / doc / examples / warp_streamlines.py View on Github external
ffaw = 'tensor_fa_warped.nii.gz'

warp_displacements(ffa, fmat, fdis, fref, ffaw, order=1)

"""
Now we will try to apply the streamlines in MNI using the previous created
displacements. For this purpose we will use the function
`warp_displacements_tracks`. However, this expects input in .dpy format
therefore we need to export them from .trk to .dpy. We do this here.
"""

from nibabel import trackvis

ftrk = 'tensor_streamlines.trk'

streams, hdr = trackvis.read(ftrk, points_space='voxel')
streamlines = [s[0] for s in streams]

from dipy.io.dpy import Dpy

fdpy = 'tensor_streamlines.dpy'

dpw = Dpy(fdpy, 'w')

"""
Write all streamlines at once.
"""

dpw.write_tracks(streamlines)

dpw.close()
github nipy / dipy / scratch / very_scratch / tractography_clustering_new_fos.py View on Github external
from fos.core.scene  import Scene
from fos.core.actors import Actor
from fos.core.plots  import Plot
from fos.core.tracks import Tracks

#fname='/home/eg01/Data_Backup/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'

fname='/home/eg01/Data_Backup/Data/PBC/pbc2009icdm/brain2/brain2_scan1_fiber_track_mni.trk'


#fname='/home/eg309/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'

opacity=0.5

print 'Loading file...'
streams,hdr=tv.read(fname)

print 'Copying tracks...'
T=[i[0] for i in streams]

T=T[:len(T)/5]

#T=T[:1000]

print 'Representing tracks using only 3 pts...'
tracks=[tm.downsample(t,3) for t in T]

print 'Deleting unnecessary data...'
del streams,hdr

print 'Local Skeleton Clustering...'
now=time.clock()