Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def apply_all_corrections(datasink_directory, name='UnwarpArtifacts'):
"""
Combines two lists of linear transforms with the deformation field
map obtained epi_correction by Ants.
Additionally, computes the corresponding bspline coefficients and
the map of determinants of the jacobian.
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['in_epi', 'in_hmc','in_ecc', 'in_dwi', 'T1']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_file', 'out_warp', 'out_coeff', 'out_jacobian']),
name='outputnode')
split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
concat_hmc_ecc = pe.MapNode(fsl.ConvertXFM(), name="concat_hmc_ecc", iterfield=['in_file', 'in_file2'])
concat_hmc_ecc.inputs.concat_xfm = True
warps = pe.MapNode(fsl.ConvertWarp(), iterfield=['premat'], name='ConvertWarp')
unwarp = pe.MapNode(interface=fsl.ApplyWarp(), iterfield=['in_file', 'field_file'],name='unwarp_warp')
unwarp.inputs.interp = 'spline'
coeffs = pe.MapNode(fsl.WarpUtils(out_format='spline'),
info = dict(contrast = [['vol_contrasts','subject_id','con_00','contrast_id','.img']],
bb_id = [['bbregister','subject_id','meana'+nameOfFirstSession+'_bbreg_',
'subject_id','.dat']])
datasource.inputs.template_args = info
#Node: FreeSurferSource - to grab FreeSurfer files from the recon-all process
fssource = pe.Node(interface=nio.FreeSurferSource(),name='fssource')
fssource.inputs.subjects_dir = subjects_dir
#Node: MRIConvert - to convert files from FreeSurfer format into nifti format
MRIconversion = pe.Node(interface=fs.MRIConvert(),name='MRIconversion')
MRIconversion.inputs.out_type = 'nii'
#Node: ApplyVolTransform - to transform contrasts into anatomical space
# creates 'con_*.anat.bb.mgh' files
transformation = pe.Node(interface=fs.ApplyVolTransform(),name='transformation')
transformation.inputs.fs_target = True
transformation.inputs.interp = 'nearest'
#Node: SegStatsorig - to extract specified regions of the original segmentation
segmentationorig = pe.Node(interface=fs.SegStats(),name='segmentationorig')
segmentationorig.inputs.segment_id = ROIregionsorig
#Node: SegStats2009 - to extract specified regions of the 2009 segmentation
segmentation2009 = pe.Node(interface=fs.SegStats(),name='segmentation2009')
segmentation2009.inputs.segment_id = ROIregions2009
#Node: Datasink - Creates a datasink node to store important outputs
datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = experiment_dir + '/results'
datasink.inputs.container = aROIOutput
fields=['out_file', 'out_mask', 'bias_corrected', 'bias_image']),
name='copy_xform', run_without_submitting=True)
trunc = pe.MapNode(ImageMath(operation='TruncateImageIntensity', op2='0.01 0.999 256'),
name='truncate_images', iterfield=['op1'])
inu_n4 = pe.MapNode(
N4BiasFieldCorrection(
dimension=3, save_bias=False, copy_header=True,
n_iterations=[50] * 4, convergence_threshold=1e-7, shrink_factor=4,
bspline_fitting_distance=bspline_fitting_distance),
n_procs=omp_nthreads, name='inu_n4', iterfield=['input_image'])
res_tmpl = pe.Node(ResampleImageBySpacing(
out_spacing=(4, 4, 4), apply_smoothing=True), name='res_tmpl')
res_tmpl.inputs.input_image = tpl_target_path
res_target = pe.Node(ResampleImageBySpacing(
out_spacing=(4, 4, 4), apply_smoothing=True), name='res_target')
lap_tmpl = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
name='lap_tmpl')
lap_tmpl.inputs.op1 = tpl_target_path
lap_target = pe.Node(ImageMath(operation='Laplacian', op2='1.5 1'),
name='lap_target')
mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl')
mrg_tmpl.inputs.in1 = tpl_target_path
mrg_target = pe.Node(niu.Merge(2), name='mrg_target')
# Initialize transforms with antsAI
init_aff = pe.Node(AI(
metric=('Mattes', 32, 'Regular', 0.25),
transform=('Affine', 0.1),
search_factor=(15, 0.1),
if 'PC' in summary_method:
std_node = pe.Node(
afni.TStat(args='-nzstdev', outputtype='NIFTI'),
name='{}_std'.format(regressor_type)
)
nuisance_wf.connect(
summary_method_input[0], summary_method_input[1],
std_node, 'in_file'
)
nuisance_wf.connect(
union_masks_paths, 'out_file',
std_node, 'mask'
)
standarized_node = pe.Node(
afni.Calc(expr='a/b', outputtype='NIFTI'),
name='{}_standarized'.format(regressor_type)
)
nuisance_wf.connect(
summary_method_input[0], summary_method_input[1],
standarized_node, 'in_file_a'
)
nuisance_wf.connect(
std_node, 'out_file',
standarized_node, 'in_file_b'
)
pc_node = pe.Node(
PC(args='-vmean -nscale', pcs=regressor_selector['summary']['components'], outputtype='NIFTI_GZ'),
name='{}_pc'.format(regressor_type)
)
centrality_wf = pe.Workflow(name=wf_name)
# Create inputspec node
input_node = pe.Node(util.IdentityInterface(fields=['in_file',
'template',
'threshold']),
name='inputspec')
# Input threshold
input_node.inputs.threshold = threshold
# Define main input/function node
# Degree centrality
if method_option == 'degree':
afni_centrality_node = \
pe.Node(DegreeCentrality(environ={'OMP_NUM_THREADS' : str(num_threads)}),
name='afni_centrality', mem_gb=memory_gb)
afni_centrality_node.inputs.out_file = 'degree_centrality_merged.nii.gz'
out_names = ('degree_centrality_binarize', 'degree_centrality_weighted')
# Eigenvector centrality
elif method_option == 'eigenvector':
afni_centrality_node = \
pe.Node(ECM(environ={'OMP_NUM_THREADS' : str(num_threads)}),
name='afni_centrality', mem_gb=memory_gb)
afni_centrality_node.inputs.out_file = 'eigenvector_centrality_merged.nii.gz'
afni_centrality_node.inputs.memory = memory_gb # 3dECM input only
out_names = ('eigenvector_centrality_binarize',
'eigenvector_centrality_weighted')
# lFCD
elif method_option == 'lfcd':
afni_centrality_node = \
pe.Node(LFCD(environ={'OMP_NUM_THREADS' : str(num_threads)}),
get_wm = pe.Node(ImageMath(operation='GetLargestComponent'),
name='05_get_wm')
get_gm = pe.Node(ImageMath(operation='GetLargestComponent'),
name='06_get_gm')
# Fill holes and calculate intersection
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} FillHoles ${EXTRACTION_GM} 2
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${EXTRACTION_TMP} ${EXTRACTION_GM}
fill_gm = pe.Node(ImageMath(operation='FillHoles', op2='2'),
name='07_fill_gm')
mult_gm = pe.Node(MultiplyImages(
dimension=3, output_product_image='08_mult_gm.nii.gz'), name='08_mult_gm')
# MultiplyImages ${DIMENSION} ${EXTRACTION_WM} ${ATROPOS_WM_CLASS_LABEL} ${EXTRACTION_WM}
# ImageMath ${DIMENSION} ${EXTRACTION_TMP} ME ${EXTRACTION_CSF} 10
relabel_wm = pe.Node(MultiplyImages(
dimension=3, second_input=in_segmentation_model[-1],
output_product_image='09_relabel_wm.nii.gz'), name='09_relabel_wm')
me_csf = pe.Node(ImageMath(operation='ME', op2='10'), name='10_me_csf')
# ImageMath ${DIMENSION} ${EXTRACTION_GM} addtozero ${EXTRACTION_GM} ${EXTRACTION_TMP}
# MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${ATROPOS_GM_CLASS_LABEL} ${EXTRACTION_GM}
# ImageMath ${DIMENSION} ${EXTRACTION_SEGMENTATION} addtozero ${EXTRACTION_WM} ${EXTRACTION_GM}
add_gm = pe.Node(ImageMath(operation='addtozero'),
name='11_add_gm')
relabel_gm = pe.Node(MultiplyImages(
dimension=3, second_input=in_segmentation_model[-2],
output_product_image='12_relabel_gm.nii.gz'), name='12_relabel_gm')
add_gm_wm = pe.Node(ImageMath(operation='addtozero'),
name='13_add_gm_wm')
# Superstep 7
Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview,
using the following two nodes.
"""
vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines")
procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines")
"""
We can easily produce a variety of scalar values from our fitted tensors. The following nodes generate the
fractional anisotropy and diffusivity trace maps and their associated headers, and then merge them back
into a single .nii file.
"""
fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa')
trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace')
dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig')
analyzeheader_fa = pe.Node(interface=camino.AnalyzeHeader(), name='analyzeheader_fa')
analyzeheader_fa.inputs.datatype = 'double'
analyzeheader_trace = pe.Node(interface=camino.AnalyzeHeader(), name='analyzeheader_trace')
analyzeheader_trace.inputs.datatype = 'double'
fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii')
trace2nii = fa2nii.clone("trace2nii")
"""
This section adds the Connectome Mapping Toolkit (CMTK) nodes.
These interfaces are fairly experimental and may not function properly.
In order to perform connectivity mapping using CMTK, the parcellated structural data is rewritten
using the indices and parcellation scheme from the connectome mapper (CMP). This process has been
written into the ROIGen interface, which will output a remapped aparc+aseg image as well as a
dictionary of label information (i.e. name, display colours) pertaining to the original and remapped regions.
output_names=['local_path'],
function=check_for_s3,
as_module=True),
name='check_for_s3')
wf.connect(selectrest, 'file_path', check_s3_node, 'file_path')
wf.connect(inputnode, 'creds_path', check_s3_node, 'creds_path')
wf.connect(inputnode, 'dl_dir', check_s3_node, 'dl_dir')
check_s3_node.inputs.img_type = 'func'
wf.connect(inputnode, 'subject', outputnode, 'subject')
wf.connect(check_s3_node, 'local_path', outputnode, 'rest')
wf.connect(inputnode, 'scan', outputnode, 'scan')
# scan parameters CSV
select_scan_params = pe.Node(function.Function(input_names=['scan',
'rest_dict',
'resource'],
output_names=['file_path'],
function=get_rest,
as_module=True),
name='select_scan_params')
select_scan_params.inputs.rest_dict = rest_dict
select_scan_params.inputs.resource = "scan_parameters"
wf.connect(inputnode, 'scan', select_scan_params, 'scan')
# if the scan parameters file is on AWS S3, download it
s3_scan_params = pe.Node(function.Function(input_names=['file_path',
'creds_path',
'dl_dir',
'img_type'],
output_names=['local_path'],
"""
Performs a second (major) intensity correction using only the brain volume a
s the input (so that it has to be done after the skull strip). Intensity
normalization works better when the skull has been removed. Creates a new
brain.mgz volume. The -autorecon2-cp stage begins here.
"""
normalization2 = pe.Node(Normalize(), name="Normalization2")
normalization2.inputs.out_file = 'brain.mgz'
ar2_wf.connect([(copy_cc, normalization2, [('out_file', 'segmentation')]),
(inputspec, normalization2, [('brainmask', 'mask')]),
(ca_normalize, normalization2, [('out_file', 'in_file')])])
# Mask Brain Final Surface
# Applies brainmask.mgz to brain.mgz to create brain.finalsurfs.mgz.
mri_mask = pe.Node(ApplyMask(), name="Mask_Brain_Final_Surface")
mri_mask.inputs.mask_thresh = 5
mri_mask.inputs.out_file = 'brain.finalsurfs.mgz'
ar2_wf.connect([(normalization2, mri_mask, [('out_file', 'in_file')]),
(inputspec, mri_mask, [('brainmask', 'mask_file')])])
# WM Segmentation
"""
Attempts to separate white matter from everything else. The input is
mri/brain.mgz, and the output is mri/wm.mgz. Uses intensity, neighborhood,
and smoothness constraints. This is the volume that is edited when manually
fixing defects. Calls mri_segment, mri_edit_wm_with_aseg, and mri_pretess.
"""
wm_seg = pe.Node(SegmentWM(), name="Segment_WM")
wm_seg.inputs.out_file = 'wm.seg.mgz'