From 3604b9686d602b579cbf4bcaa797305ea8af2431 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 13 Aug 2024 15:17:44 -0400 Subject: [PATCH 1/2] Remove QSIPrep-specific parameters and Config elements (#6) --------- Co-authored-by: Matt Cieslak --- .circleci/AllFieldmaps.sh | 64 ---- .circleci/DRBUDDI_SHORELine_epi.sh | 43 --- .circleci/DRBUDDI_TENSORLine_epi.sh | 40 -- .circleci/DRBUDDI_eddy_rpe_series.sh | 43 --- .circleci/DSCSDSI.sh | 51 --- .circleci/DSCSDSI_outputs.txt | 48 --- .circleci/DSDTI_TOPUP.sh | 49 --- .circleci/DSDTI_nofmap.sh | 49 --- .circleci/DSDTI_nofmap_outputs.txt | 48 --- .circleci/DSDTI_outputs.txt | 117 ------ .circleci/DSDTI_synfmap.sh | 53 --- .circleci/DSDTI_synsdc_outputs.txt | 48 --- .circleci/HBCD_preproc.sh | 44 --- .circleci/IntramodalTemplate.sh | 49 --- .circleci/IntramodalTemplate_outputs.txt | 69 ---- .circleci/MultiT1w.sh | 87 ----- .circleci/get_data.sh | 4 +- .circleci/testing_eddy_params.json | 0 docs/reconstruction.rst | 13 +- docs/usage.rst | 10 +- qsirecon/cli/parser.py | 355 +----------------- qsirecon/cli/run.py | 133 +------ qsirecon/cli/workflow.py | 7 +- qsirecon/config.py | 106 +----- qsirecon/interfaces/interchange.py | 18 +- qsirecon/tests/data/amico_noddi_outputs.txt | 19 +- qsirecon/tests/data/autotrack_outputs.txt | 18 +- qsirecon/tests/data/dipy_dki_outputs.txt | 18 +- qsirecon/tests/data/dipy_mapmri_outputs.txt | 18 +- qsirecon/tests/data/drbuddi_rpe_outputs.txt | 23 -- .../data/drbuddi_shoreline_epi_outputs.txt | 23 -- .../data/drbuddi_tensorline_epi_outputs.txt | 23 -- qsirecon/tests/data/dscsdsi_fmap_outputs.txt | 0 qsirecon/tests/data/dscsdsi_outputs.txt | 33 -- qsirecon/tests/data/dsdti_fmap_outputs.txt | 0 qsirecon/tests/data/dsdti_nofmap_outputs.txt | 33 -- qsirecon/tests/data/dsdti_synfmap_outputs.txt | 33 -- qsirecon/tests/data/dsdti_topup_outputs.txt | 33 -- qsirecon/tests/data/eddy_config.json | 20 - qsirecon/tests/data/eddy_cuda_config.json | 21 -- qsirecon/tests/data/forrest_gump_filter.json | 8 - qsirecon/tests/data/forrest_gump_outputs.txt | 35 -- .../data/intramodal_template_outputs.txt | 69 ---- .../data/maternal_brain_project_filter.json | 5 - .../data/maternal_brain_project_outputs.txt | 36 -- qsirecon/tests/data/mrtrix3_recon_outputs.txt | 18 +- .../mrtrix_singleshell_ss3t_act_outputs.txt | 19 +- .../mrtrix_singleshell_ss3t_noact_outputs.txt | 19 +- qsirecon/tests/data/multi_t1w_outputs.txt | 0 .../data/pyafq_recon_external_trk_outputs.txt | 14 +- .../tests/data/pyafq_recon_full_outputs.txt | 18 +- qsirecon/tests/data/scalar_mapper_outputs.txt | 20 +- .../tests/data/tortoise_recon_outputs.txt | 18 +- qsirecon/tests/test_cli.py | 36 +- qsirecon/tests/utils.py | 6 +- qsirecon/utils/sentry.py | 1 - qsirecon/workflows/base.py | 68 ++-- qsirecon/workflows/recon/anatomical.py | 214 +++++------ qsirecon/workflows/recon/dipy.py | 4 +- qsirecon/workflows/recon/dsi_studio.py | 2 +- qsirecon/workflows/recon/mrtrix.py | 8 +- qsirecon/workflows/reports.py | 26 +- tests/get_data.py | 1 - tests/opts_tests.py | 24 +- wrapper/qsirecon_container/qsiprep_docker.py | 7 - .../qsirecon_container/qsiprep_singularity.py | 7 - 66 files changed, 301 insertions(+), 2245 deletions(-) delete mode 100644 .circleci/AllFieldmaps.sh delete mode 100644 .circleci/DRBUDDI_SHORELine_epi.sh delete mode 100644 .circleci/DRBUDDI_TENSORLine_epi.sh delete mode 100644 .circleci/DRBUDDI_eddy_rpe_series.sh delete mode 100644 .circleci/DSCSDSI.sh delete mode 100644 .circleci/DSCSDSI_outputs.txt delete mode 100644 .circleci/DSDTI_TOPUP.sh delete mode 100644 .circleci/DSDTI_nofmap.sh delete mode 100644 .circleci/DSDTI_nofmap_outputs.txt delete mode 100644 .circleci/DSDTI_outputs.txt delete mode 100644 .circleci/DSDTI_synfmap.sh delete mode 100644 .circleci/DSDTI_synsdc_outputs.txt delete mode 100644 .circleci/HBCD_preproc.sh delete mode 100644 .circleci/IntramodalTemplate.sh delete mode 100644 .circleci/IntramodalTemplate_outputs.txt delete mode 100644 .circleci/MultiT1w.sh delete mode 100644 .circleci/testing_eddy_params.json delete mode 100644 qsirecon/tests/data/drbuddi_rpe_outputs.txt delete mode 100644 qsirecon/tests/data/drbuddi_shoreline_epi_outputs.txt delete mode 100644 qsirecon/tests/data/drbuddi_tensorline_epi_outputs.txt delete mode 100644 qsirecon/tests/data/dscsdsi_fmap_outputs.txt delete mode 100644 qsirecon/tests/data/dscsdsi_outputs.txt delete mode 100644 qsirecon/tests/data/dsdti_fmap_outputs.txt delete mode 100644 qsirecon/tests/data/dsdti_nofmap_outputs.txt delete mode 100644 qsirecon/tests/data/dsdti_synfmap_outputs.txt delete mode 100644 qsirecon/tests/data/dsdti_topup_outputs.txt delete mode 100644 qsirecon/tests/data/eddy_config.json delete mode 100644 qsirecon/tests/data/eddy_cuda_config.json delete mode 100644 qsirecon/tests/data/forrest_gump_filter.json delete mode 100644 qsirecon/tests/data/forrest_gump_outputs.txt delete mode 100644 qsirecon/tests/data/intramodal_template_outputs.txt delete mode 100644 qsirecon/tests/data/maternal_brain_project_filter.json delete mode 100644 qsirecon/tests/data/maternal_brain_project_outputs.txt delete mode 100644 qsirecon/tests/data/multi_t1w_outputs.txt diff --git a/.circleci/AllFieldmaps.sh b/.circleci/AllFieldmaps.sh deleted file mode 100644 index 3ba6c289..00000000 --- a/.circleci/AllFieldmaps.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -cat << DOC - -AllFieldmaps test -================= - -Instead of running full workflows, this test checks that workflows can -be built for all sorts of fieldmap configurations. - -This tests the following features: - - Blip-up + Blip-down DWI series for TOPUP/Eddy - - Eddy is run on a CPU - - Denoising is skipped - - A follow-up reconstruction using the dsi_studio_gqi workflow - -Inputs: -------- - - - DSDTI BIDS data (data/DSDTI_fmap) - -DOC -set +e -source ./get_data.sh -TESTDIR=${PWD} -get_config_data ${TESTDIR} -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json -export FS_LICENSE=${TESTDIR}/data/license.txt -get_bids_data ${TESTDIR} fmaps - -# Test blip-up blip-down shelled series (TOPUP/eddy) -TESTNAME=DTI_SDC -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/fmaptests/DSDTI_fmap -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --boilerplate \ - --sloppy --write-graph --mem_mb 4096 \ - -vv --output-resolution 5 - -# Test blip-up blip-down non-shelled series (SHORELine/sdcflows) -TESTNAME=DSI_SDC -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/fmaptests/DSCSDSI_fmap -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# Test blip-up blip-down shelled series (TOPUP/eddy) -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --boilerplate \ - --hmc-model 3dSHORE \ - --sloppy --write-graph --mem_mb 4096 \ - -vv --output-resolution 5 - - - - diff --git a/.circleci/DRBUDDI_SHORELine_epi.sh b/.circleci/DRBUDDI_SHORELine_epi.sh deleted file mode 100644 index 5f96db3a..00000000 --- a/.circleci/DRBUDDI_SHORELine_epi.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -cat << DOC - -Test EPI fieldmap correction with SHORELine + DRBUDDI -===================================================== - -This tests the following features: - - SHORELine (here, just b=0 registration) motion correction - - - -DOC - -set +e -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=DRBUDDI_SHORELINE_EPI -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} drbuddi_epi -CFG=${TESTDIR}/data/nipype.cfg - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/tinytensor_epi -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# Do the anatomical run on its own -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --sloppy \ - --anat-modality none \ - --denoise-method none \ - --b1-biascorrect-stage none \ - --pepolar-method DRBUDDI \ - --hmc-model none \ - --output-resolution 2 \ - --shoreline-iters 1 \ - -vv --stop-on-first-crash - - diff --git a/.circleci/DRBUDDI_TENSORLine_epi.sh b/.circleci/DRBUDDI_TENSORLine_epi.sh deleted file mode 100644 index 7d6fd48c..00000000 --- a/.circleci/DRBUDDI_TENSORLine_epi.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -cat << DOC - -Test EPI fieldmap correction with TENSORLine + DRBUDDI -====================================================== - -This tests the following features: - - TENSORLine (tensor-based) motion correction - -DOC - -set +e -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=DRBUDDI_TENSORLINE_EPI -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} DSDTI -CFG=${TESTDIR}/data/nipype.cfg - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/DSDTI -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# Do the anatomical run on its own -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --sloppy \ - --anat-modality none \ - --denoise-method none \ - --b1-biascorrect-stage none \ - --pepolar-method DRBUDDI \ - --hmc-model tensor \ - --output-resolution 2 \ - --shoreline-iters 1 \ - -vv --stop-on-first-crash \ No newline at end of file diff --git a/.circleci/DRBUDDI_eddy_rpe_series.sh b/.circleci/DRBUDDI_eddy_rpe_series.sh deleted file mode 100644 index 7583bd6f..00000000 --- a/.circleci/DRBUDDI_eddy_rpe_series.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -cat << DOC - -Test paired DWI series with DRBUDDI -=================================== - -This tests the following features: - - Eddy is run on a CPU - - DRBUDDI is run with two DWI series - -DOC - -set +e -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=DRBUDDI_RPE -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} drbuddi_rpe_series -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/tinytensor_rpe_series -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# Do the anatomical run on its own -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --sloppy \ - --anat-modality none \ - --denoise-method none \ - --b1_biascorrect_stage none \ - --pepolar-method DRBUDDI \ - --eddy_config ${EDDY_CFG} \ - --output-resolution 5 \ - -vv --stop-on-first-crash - - diff --git a/.circleci/DSCSDSI.sh b/.circleci/DSCSDSI.sh deleted file mode 100644 index 7c192888..00000000 --- a/.circleci/DSCSDSI.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -cat << DOC - -DSCSDSI test -============ - -This tests the following features: - - Whether the --anat-only workflow is successful - - Whether the regular qsirecon workflow can resume using the - working directory from --anat-only - - The SHORELine motion correction workflow - - Skipping B1 biascorrection - - Using the SyN-SDC distortion correction method - -Inputs: -------- - - - DSCSDSI BIDS data (data/DSCSDSI_nofmap) - -DOC -set +e -# Setup environment and get data -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=DSCSDSI -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} DSCSDSI -CFG=${TESTDIR}/data/nipype.cfg - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/DSCSDSI_nofmap -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# name: Run full qsirecon on DSCSDSI -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --sloppy --write-graph --use-syn-sdc \ - --force-syn \ - --b1_biascorrect_stage none \ - --hmc_model 3dSHORE \ - --hmc-transform Rigid \ - --shoreline_iters 1 \ - --output-resolution 5 \ - --stop-on-first-crash \ - -vv - diff --git a/.circleci/DSCSDSI_outputs.txt b/.circleci/DSCSDSI_outputs.txt deleted file mode 100644 index dbc0cfca..00000000 --- a/.circleci/DSCSDSI_outputs.txt +++ /dev/null @@ -1,48 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-tester -qsirecon/sub-tester/anat -qsirecon/sub-tester/anat/sub-tester_desc-brain_mask.nii.gz -qsirecon/sub-tester/anat/sub-tester_desc-preproc_T1w.nii.gz -qsirecon/sub-tester/anat/sub-tester_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-tester/anat/sub-tester_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_label-CSF_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_label-GM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_label-WM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz -qsirecon/sub-tester/dwi -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_confounds.tsv -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_desc-ImageQC_dwi.csv -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_desc-SliceQC_dwi.json -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_dwiqc.json -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-3dSHORE_cnr.nii.gz -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_dwiref.nii.gz -qsirecon/sub-tester/figures -qsirecon/sub-tester/figures/sub-tester_acq-HASC55AP_carpetplot.svg -qsirecon/sub-tester/figures/sub-tester_acq-HASC55AP_coreg.svg -qsirecon/sub-tester/figures/sub-tester_acq-HASC55AP_desc-resampled_b0ref.svg -qsirecon/sub-tester/figures/sub-tester_acq-HASC55AP_desc-sdc_b0.svg -qsirecon/sub-tester/figures/sub-tester_acq-HASC55AP_dwi_denoise_acq_HASC55AP_dwi_wf_denoising.svg -qsirecon/sub-tester/figures/sub-tester_acq-HASC55AP_sampling_scheme.gif -qsirecon/sub-tester/figures/sub-tester_acq-HASC55AP_shoreline_animation.gif -qsirecon/sub-tester/figures/sub-tester_seg_brainmask.svg -qsirecon/sub-tester/figures/sub-tester_t1_2_mni.svg -qsirecon/sub-tester.html -/tmp/DSCSDSI/derivatives diff --git a/.circleci/DSDTI_TOPUP.sh b/.circleci/DSDTI_TOPUP.sh deleted file mode 100644 index 9fb4beb4..00000000 --- a/.circleci/DSDTI_TOPUP.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -cat << DOC - -DSCDTI_TOPUP test -================= - -This tests the following features: - - TOPUP on a single-shell sequence - - Eddy is run on a CPU - - mrdegibbs is run - - A follow-up reconstruction using the dsi_studio_gqi workflow - -Inputs: -------- - - - DSDTI BIDS data (data/DSDTI) - -DOC - -set +e -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=DSDTI_TOPUP -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} DSDTI -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/DSDTI -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# Do the anatomical run on its own -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --sloppy \ - --unringing-method mrdegibbs \ - --b1-biascorrect-stage legacy \ - --recon-spec dsi_studio_gqi \ - --eddy_config ${EDDY_CFG} \ - --output-resolution 5 \ - -vv - - diff --git a/.circleci/DSDTI_nofmap.sh b/.circleci/DSDTI_nofmap.sh deleted file mode 100644 index a334ed0b..00000000 --- a/.circleci/DSDTI_nofmap.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -cat << DOC - -DSCDTI_nofmap test -================== - -This tests the following features: - - A workflow with no distortion correction followed by eddy - - Eddy is run on a CPU - - Denoising is skipped - - A follow-up reconstruction using the dsi_studio_gqi workflow - -Inputs: -------- - - - DSDTI BIDS data (data/DSDTI) - -DOC -set +e -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=DSDTI_nofmap -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} DSDTI -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/DSDTI -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# CRITICAL: delete the fieldmap data -rm -rf data/DSDTI/sub-PNC/fmap - -# Do the anatomical run on its own -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --eddy-config ${EDDY_CFG} \ - --denoise-method none \ - --unringing-method rpg \ - --b1-biascorrect-stage none \ - --sloppy \ - --output-resolution 5 \ - -vv diff --git a/.circleci/DSDTI_nofmap_outputs.txt b/.circleci/DSDTI_nofmap_outputs.txt deleted file mode 100644 index ed902888..00000000 --- a/.circleci/DSDTI_nofmap_outputs.txt +++ /dev/null @@ -1,48 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC/anat -qsirecon/sub-PNC/anat/sub-PNC_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-PNC/anat/sub-PNC_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_label-CSF_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_label-GM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_label-WM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_confounds.tsv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-ImageQC_dwi.csv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-SliceQC_dwi.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_dwiqc.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.b -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_dwiref.nii.gz -qsirecon/sub-PNC/figures -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_carpetplot.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_coreg.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_desc-resampled_b0ref.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_desc-sdc_b0.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_dwi_denoise_acq_realistic_dwi_wf_biascorr.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_dwi_denoise_acq_realistic_dwi_wf_denoising.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_sampling_scheme.gif -qsirecon/sub-PNC/figures/sub-PNC_seg_brainmask.svg -qsirecon/sub-PNC/figures/sub-PNC_t1_2_mni.svg -qsirecon/sub-PNC.html -/tmp/DSDTI/nofmap_derivatives diff --git a/.circleci/DSDTI_outputs.txt b/.circleci/DSDTI_outputs.txt deleted file mode 100644 index cfec9cc2..00000000 --- a/.circleci/DSDTI_outputs.txt +++ /dev/null @@ -1,117 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC/anat -qsirecon/sub-PNC/anat/sub-PNC_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-PNC/anat/sub-PNC_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_label-CSF_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_label-GM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_label-WM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_confounds.tsv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-ImageQC_dwi.csv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-SliceQC_dwi.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_dwiqc.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.b -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_dwiref.nii.gz -qsirecon/sub-PNC/figures -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_carpetplot.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_coreg.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_desc-resampled_b0ref.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_desc-sdc_b0.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_dwi_denoise_acq_realistic_dwi_wf_biascorr.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_dwi_denoise_acq_realistic_dwi_wf_denoising.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_dwi_denoise_acq_realistic_dwi_wf_unringing.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_sampling_scheme.gif -qsirecon/sub-PNC/figures/sub-PNC_seg_brainmask.svg -qsirecon/sub-PNC/figures/sub-PNC_t1_2_mni.svg -qsirecon/sub-PNC.html -qsirecon -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aal116_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aal116_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aal116_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aal116_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-ad_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aicha384_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aicha384_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aicha384_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-aicha384_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-brainnetome246_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-brainnetome246_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-brainnetome246_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-brainnetome246_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-dti_fa_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-fa0_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-fa1_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-fa2_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-gfa_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-gordon333_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-gordon333_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-gordon333_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-gordon333_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-iso_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-md_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-power264_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-power264_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-power264_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-power264_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-rd_gqiscalar.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x17_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x17_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x17_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x17_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x7_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x7_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x7_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer100x7_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x17_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x17_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x17_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x17_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x7_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x7_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x7_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer200x7_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x17_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x17_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x17_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x17_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x7_atlas.mif.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x7_atlas.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x7_mrtrixLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-schaefer400x7_origLUT.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_gqi.fib.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_gqinetwork.mat -qsirecon/sub-PNC/figures -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-DSIStudioConnectivity_matrices.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-GQIODF_odfs.png -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_space-T1w_desc-preproc_space-T1w_desc-GQIODF_peaks.png -qsirecon/sub-PNC.html -/tmp/DSDTI/derivatives diff --git a/.circleci/DSDTI_synfmap.sh b/.circleci/DSDTI_synfmap.sh deleted file mode 100644 index f1fc48aa..00000000 --- a/.circleci/DSDTI_synfmap.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -cat << DOC - -DSCDTI_nofmap test -================== - -This tests the following features: - - A workflow with no distortion correction followed by eddy - - Eddy is run on a CPU - - Denoising is skipped - - A follow-up reconstruction using the dsi_studio_gqi workflow - -Inputs: -------- - - - DSDTI BIDS data (data/DSDTI) - -DOC -set +e - -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=DSDTI_nofmap -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} DSDTI -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/DSDTI -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# CRITICAL: delete the fieldmap data -rm -rf data/DSDTI/sub-PNC/fmap - - -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --eddy-config ${EDDY_CFG} \ - --sloppy \ - --force-syn \ - --b1-biascorrect-stage final \ - --denoise-method none \ - --output-resolution 5 \ - -vv - - - diff --git a/.circleci/DSDTI_synsdc_outputs.txt b/.circleci/DSDTI_synsdc_outputs.txt deleted file mode 100644 index a94a9fac..00000000 --- a/.circleci/DSDTI_synsdc_outputs.txt +++ /dev/null @@ -1,48 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC/anat -qsirecon/sub-PNC/anat/sub-PNC_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-PNC/anat/sub-PNC_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_label-CSF_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_label-GM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_label-WM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_confounds.tsv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-ImageQC_dwi.csv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-SliceQC_dwi.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_dwiqc.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.b -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_dwiref.nii.gz -qsirecon/sub-PNC/figures -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_carpetplot.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_coreg.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_desc-resampled_b0ref.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_desc-sdc_b0.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_dwi_denoise_acq_realistic_dwi_wf_biascorr.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_dwi_denoise_acq_realistic_dwi_wf_denoising.svg -qsirecon/sub-PNC/figures/sub-PNC_acq-realistic_sampling_scheme.gif -qsirecon/sub-PNC/figures/sub-PNC_seg_brainmask.svg -qsirecon/sub-PNC/figures/sub-PNC_t1_2_mni.svg -qsirecon/sub-PNC.html -/tmp/DSDTI/synfmap_derivatives diff --git a/.circleci/HBCD_preproc.sh b/.circleci/HBCD_preproc.sh deleted file mode 100644 index 6767ecde..00000000 --- a/.circleci/HBCD_preproc.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -cat << DOC - -Test paired DWI series with DRBUDDI -=================================== - -This tests the following features: - - Eddy is run on a CPU - - DRBUDDI is run with two DWI series - -DOC - -set +e -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=HBCD -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} HBCD -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/hbcd_sim -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# Do the HBCD-style run -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --sloppy \ - --anat-modality T2w \ - --infant \ - --denoise-method dwidenoise \ - --b1_biascorrect_stage none \ - --pepolar-method DRBUDDI \ - --eddy_config ${EDDY_CFG} \ - --output-resolution 5 \ - -vv --stop-on-first-crash - - diff --git a/.circleci/IntramodalTemplate.sh b/.circleci/IntramodalTemplate.sh deleted file mode 100644 index 42afef6d..00000000 --- a/.circleci/IntramodalTemplate.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -cat << DOC - -IntramodalTemplate test -======================= - -A two-session dataset is used to create an intramodal template. - -This tests the following features: - - Blip-up + Blip-down DWI series for TOPUP/Eddy - - Eddy is run on a CPU - - Denoising is skipped - - A follow-up reconstruction using the dsi_studio_gqi workflow - -Inputs: -------- - - - twoses BIDS data (data/DSDTI_fmap) - -DOC -set +e - -source ./get_data.sh -TESTDIR=${PWD} -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} twoses -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json -export FS_LICENSE=${TESTDIR}/data/license.txt - -# Test blip-up blip-down shelled series (TOPUP/eddy) -TESTNAME=imtemplate -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/twoses -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --sloppy \ - --b1_biascorrect_stage none \ - --hmc_model none \ - --b0-motion-corr-to first \ - --output-resolution 5 \ - --intramodal-template-transform BSplineSyN \ - --intramodal-template-iters 2 \ - -vv diff --git a/.circleci/IntramodalTemplate_outputs.txt b/.circleci/IntramodalTemplate_outputs.txt deleted file mode 100644 index a7519928..00000000 --- a/.circleci/IntramodalTemplate_outputs.txt +++ /dev/null @@ -1,69 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-tester -qsirecon/sub-tester/anat -qsirecon/sub-tester/anat/sub-tester_desc-brain_mask.nii.gz -qsirecon/sub-tester/anat/sub-tester_desc-preproc_T1w.nii.gz -qsirecon/sub-tester/anat/sub-tester_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_label-CSF_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_label-GM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_label-WM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz -qsirecon/sub-tester/figures -qsirecon/sub-tester/figures/sub-tester_imtcoreg.svg -qsirecon/sub-tester/figures/sub-tester_seg_brainmask.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_carpetplot.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_coreg.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_desc-resampled_b0ref.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_dwi_denoise_ses_1_acq_HASC55PA_dwi_wf_biascorr.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_sampling_scheme.gif -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_tointramodal.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_carpetplot.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_coreg.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_desc-resampled_b0ref.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_dwi_denoise_ses_2_acq_HASC55AP_dwi_wf_biascorr.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_sampling_scheme.gif -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_tointramodal.svg -qsirecon/sub-tester/figures/sub-tester_t1_2_mni.svg -qsirecon/sub-tester.html -qsirecon/sub-tester/ses-1 -qsirecon/sub-tester/ses-1/anat -qsirecon/sub-tester/ses-1/anat/sub-tester_ses-1_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-tester/ses-1/dwi -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_confounds.tsv -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_desc-ImageQC_dwi.csv -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_desc-SliceQC_dwi.json -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_dwiqc.json -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-none_cnr.nii.gz -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_dwiref.nii.gz -qsirecon/sub-tester/ses-2 -qsirecon/sub-tester/ses-2/dwi -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_confounds.tsv -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_desc-ImageQC_dwi.csv -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_desc-SliceQC_dwi.json -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_dwiqc.json -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-none_cnr.nii.gz -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_dwiref.nii.gz -/tmp/twoses/derivatives diff --git a/.circleci/MultiT1w.sh b/.circleci/MultiT1w.sh deleted file mode 100644 index ac0566b9..00000000 --- a/.circleci/MultiT1w.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -cat << DOC - -MultiT1w test -================== - -This tests the following features: - - freesurfer's robust template - -Inputs: -------- - - - DSDTI BIDS data (data/DSDTI) - -DOC -set +e - -source ./get_data.sh -TESTDIR=${PWD} -TESTNAME=MultiT1w -get_config_data ${TESTDIR} -get_bids_data ${TESTDIR} DSDTI -CFG=${TESTDIR}/data/nipype.cfg -EDDY_CFG=${TESTDIR}/data/eddy_config.json - -# For the run -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives -BIDS_INPUT_DIR=${TESTDIR}/data/DSDTI -export FS_LICENSE=${TESTDIR}/data/license.txt -QSIRECON_CMD=$(run_qsirecon_cmd ${BIDS_INPUT_DIR} ${OUTPUT_DIR}) - -# CRITICAL: delete the fieldmap data -rm -rf data/DSDTI/sub-PNC/fmap - -# Create a shifted version of the t1w -if [[ "${IN_CI}" = 'true' ]]; then - 3dWarp \ - -matvec_in2out 'MATRIX(1,0,0,2,0,1,0,4,0,0,1,1)' \ - -gridset ${BIDS_INPUT_DIR}/sub-PNC/anat/sub-PNC_T1w.nii.gz \ - -prefix ${BIDS_INPUT_DIR}/sub-PNC/anat/sub-PNC_run-02_T1w.nii.gz \ - ${BIDS_INPUT_DIR}/sub-PNC/anat/sub-PNC_T1w.nii.gz -else - docker run -u $(id -u) \ - -v ${BIDS_INPUT_DIR}:/BIDS \ - --rm -ti --entrypoint 3dWarp \ - ${IMAGE} \ - -matvec_in2out 'MATRIX(1,0,0,2,0,1,0,4,0,0,1,1)' \ - -gridset /BIDS/sub-PNC/anat/sub-PNC_T1w.nii.gz \ - -prefix /BIDS/sub-PNC/anat/sub-PNC_run-02_T1w.nii.gz \ - /BIDS/sub-PNC/anat/sub-PNC_T1w.nii.gz - -fi - -cp ${BIDS_INPUT_DIR}/sub-PNC/anat/sub-PNC_T1w.json \ - ${BIDS_INPUT_DIR}/sub-PNC/anat/sub-PNC_run-02_T1w.json - -# Do the anatomical run on its own -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --eddy-config ${EDDY_CFG} \ - --denoise-method none \ - --sloppy \ - --output-resolution 5 \ - --anat-only \ - -vv - - -# Explicitly test --longitudinal -TESTNAME=Longitudinal -setup_dir ${TESTDIR}/${TESTNAME} -TEMPDIR=${TESTDIR}/${TESTNAME}/work -OUTPUT_DIR=${TESTDIR}/${TESTNAME}/derivatives - -${QSIRECON_CMD} \ - -w ${TEMPDIR} \ - --eddy-config ${EDDY_CFG} \ - --denoise-method none \ - --sloppy \ - --output-resolution 5 \ - --anat-only \ - --longitudinal \ - -vv - - diff --git a/.circleci/get_data.sh b/.circleci/get_data.sh index 7c648b1c..938beb13 100644 --- a/.circleci/get_data.sh +++ b/.circleci/get_data.sh @@ -309,8 +309,8 @@ Contents: ^^^^^^^^^ - data/singleshell_output/qsirecon/dataset_description.json - - data/singleshell_output/qsirecon/logs/CITATION.html - - data/singleshell_output/qsirecon/logs/CITATION.md + - data/singleshell_output/logs/CITATION.html + - data/singleshell_output/logs/CITATION.md - data/singleshell_output/qsirecon/logs/CITATION.tex - data/singleshell_output/qsirecon/sub-PNC/anat/sub-PNC_desc-brain_mask.nii.gz - data/singleshell_output/qsirecon/sub-PNC/anat/sub-PNC_desc-preproc_T1w.nii.gz diff --git a/.circleci/testing_eddy_params.json b/.circleci/testing_eddy_params.json deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/reconstruction.rst b/docs/reconstruction.rst index dd6242fd..f1a38364 100644 --- a/docs/reconstruction.rst +++ b/docs/reconstruction.rst @@ -64,8 +64,9 @@ To use a pre-packaged workflow, simply provide the name from the leftmost column ``--recon-spec`` argument. For example:: $ qsirecon-docker \ - /path/to/bids /path/for/reconstruction/outputs participant \ - --recon_input /output/from/qsirecon \ + /output/from/qsiprep \ + /path/for/reconstruction/outputs \ + participant \ --recon_spec dsi_studio_gqi \ --fs-license-file /path/to/license.txt @@ -163,8 +164,7 @@ and from ``qsirecon``:: You can run:: $ qsirecon-docker \ - derivatives/qsirecon derivatives participant \ - --recon_input derivatives/qsirecon \ + derivatives/qsiprep derivatives participant \ --recon_spec mrtrix_multishell_msmt_ACT-hsvs \ --freesurfer-input derivatives/freesurfer \ --fs-license-file /path/to/license.txt @@ -519,8 +519,9 @@ Assuming this file is called ``qgi_scalar_export.json`` and you've installed ``qsirecon-container`` you can execute this pipeline with:: $ qsirecon-docker \ - /path/to/bids /where/my/reconstructed/data/goes participant \ - --recon_input /output/from/qsirecon \ + /output/from/qsiprep \ + /where/my/reconstructed/data/goes \ + participant \ --recon_spec gqi_scalar_export.json \ --fs-license-file /path/to/license.txt diff --git a/docs/usage.rst b/docs/usage.rst index eb6ebf85..9f492653 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -7,10 +7,10 @@ The ``qsirecon`` postprocessing workflow takes as principal input the path of the preprocessing derivatives dataset that is to be processed. The input dataset is required to be in valid :abbr:`BIDS (Brain Imaging Data Structure)` format with at least one -diffusion MRI series. The T1w image and the DWI may be in separate BIDS - folders for a given subject. We highly recommend that you validate -your dataset with the free, online `BIDS Validator -`_. +diffusion MRI series. +The T1w image and the DWI may be in separate BIDS folders for a given subject. +We highly recommend that you validate your dataset with the free, +online `BIDS Validator `_. The exact command to run ``qsirecon`` depends on the Installation_ method. The common parts of the command are similar to the `BIDS-Apps @@ -18,7 +18,7 @@ The common parts of the command are similar to the `BIDS-Apps Example: :: - qsirecon data/bids_root/ out/ participant -w work/ + qsirecon data/bids_root/ out/ participant -w work/ Command-Line Arguments diff --git a/qsirecon/cli/parser.py b/qsirecon/cli/parser.py index 8ea43a31..bfa0119f 100644 --- a/qsirecon/cli/parser.py +++ b/qsirecon/cli/parser.py @@ -24,8 +24,6 @@ # """Parser.""" -import sys - from .. import config @@ -34,52 +32,12 @@ def _build_parser(**kwargs): ``kwargs`` are passed to ``argparse.ArgumentParser`` (mainly useful for debugging). """ - from argparse import Action, ArgumentDefaultsHelpFormatter, ArgumentParser + from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from functools import partial from pathlib import Path from packaging.version import Version - deprecations = { - # parser attribute name: (replacement flag, version slated to be removed in) - "dwi_only": ("--anat-modality none", "0.23.0"), - "prefer_dedicated_fmaps": (None, "0.23.0"), - "do_reconall": (None, "0.23.0"), - "dwi_no_biascorr": ("--b1-biascorrect-stage none", "0.23.0"), - "recon_only": (None, "0.23.0"), - "b0_motion_corr_to": (None, "0.23.0"), - "b0_to_t1w_transform": ("--b0-t0-anat-transform", "0.23.0"), - } - - class DeprecatedAction(Action): - def __call__(self, parser, namespace, values, option_string=None): - new_opt, rem_vers = deprecations.get(self.dest, (None, None)) - msg = ( - f"{self.option_strings} has been deprecated and will be removed in " - f"{rem_vers or 'a later version'}." - ) - if new_opt: - msg += f" Please use `{new_opt}` instead." - print(msg, file=sys.stderr) - delattr(namespace, self.dest) - - class ToDict(Action): - def __call__(self, parser, namespace, values, option_string=None): - d = {} - for spec in values: - try: - name, loc = spec.split("=") - loc = Path(loc) - except ValueError: - loc = Path(spec) - name = loc.name - - if name in d: - raise ValueError(f"Received duplicate derivative name: {name}") - - d[name] = loc - setattr(namespace, self.dest, d) - def _path_exists(path, parser): """Ensure a given path exists.""" if path is None or not Path(path).exists(): @@ -177,13 +135,6 @@ def _bids_filter(value, parser): ) g_bids = parser.add_argument_group("Options for filtering BIDS queries") - g_bids.add_argument( - "--skip_bids_validation", - "--skip-bids-validation", - action="store_true", - default=False, - help="Assume the input dataset is BIDS compliant and skip the validation", - ) g_bids.add_argument( "--participant-label", "--participant_label", @@ -268,12 +219,6 @@ def _bids_filter(value, parser): ) g_subset = parser.add_argument_group("Options for performing only a subset of the workflow") - g_subset.add_argument("--anat-only", action="store_true", help="Run anatomical workflows only") - g_subset.add_argument( - "--dwi-only", - action="store_true", - help="ignore anatomical (T1w/T2w) data and process DWIs only", - ) g_subset.add_argument( "--boilerplate-only", "--boilerplate_only", @@ -297,16 +242,6 @@ def _bids_filter(value, parser): ) g_conf = parser.add_argument_group("Workflow configuration") - g_conf.add_argument( - "--ignore", - required=False, - action="store", - nargs="+", - default=[], - choices=["fieldmaps", "sbref", "t2w", "flair", "fmap-jacobian"], - help="Ignore selected aspects of the input dataset to disable corresponding " - "parts of the workflow (a space delimited list)", - ) g_conf.add_argument( "--infant", action="store_true", help="configure pipelines to process infant brains" ) @@ -315,22 +250,6 @@ def _bids_filter(value, parser): action="store_true", help="Treat dataset as longitudinal - may increase runtime", ) - g_conf.add_argument( - "--skip-anat-based-spatial-normalization", - action="store_true", - default=False, - help="skip running the anat-based normalization to template space. " - "Default is to run the normalization.", - ) - g_conf.add_argument( - "--anat-modality", - "--anat_modality", - choices=["T1w", "T2w", "none"], - default="T1w", - help="Modality to use as the anatomical reference. Images of this " - "contrast will be skull stripped and segmented for use in the " - "visual reports and reconstruction. If --infant, T2w is forced.", - ) g_conf.add_argument( "--b0-threshold", "--b0_threshold", @@ -341,132 +260,18 @@ def _bids_filter(value, parser): "a b=0 image. Current default threshold = 100; this threshold can be " "lowered or increased. Note, setting this too high can result in inaccurate results.", ) - g_conf.add_argument( - "--dwi_denoise_window", - "--dwi-denoise-window", - action="store", - default="auto", - help='window size in voxels for image-based denoising, integer or "auto".' - 'If "auto", 5 will be used for dwidenoise and auto-configured for ' - "patch2self based on the number of b>0 images.", - ) - g_conf.add_argument( - "--denoise-method", - "--denoise_method", - action="store", - choices=["dwidenoise", "patch2self", "none"], - default="dwidenoise", - help='Image-based denoising method. Either "dwidenoise" (MRtrix), ' - '"patch2self" (DIPY) or none. (default: dwidenoise)', - ) - g_conf.add_argument( - "--unringing-method", - "--unringing_method", - action="store", - choices=["none", "mrdegibbs", "rpg"], - help="Method for Gibbs-ringing removal.\n - none: no action\n - mrdegibbs: " - "use mrdegibbs from mrtrix3\n - rpg: Gibbs from TORTOISE, suggested for partial" - " Fourier acquisitions (default: none).", - ) - g_conf.add_argument( - "--dwi-no-biascorr", - "--dwi_no_biascorr", - action="store_true", - help="DEPRECATED: see --b1-biascorrect-stage", - ) - g_conf.add_argument( - "--b1-biascorrect-stage", - "--b1_biascorrect_stage", - action="store", - choices=["final", "none", "legacy"], - default="final", - help="Which stage to apply B1 bias correction. The default 'final' will " - "apply it after all the data has been resampled to its final space. " - "'none' will skip B1 bias correction and 'legacy' will behave consistent " - "with qsirecon < 0.17.", - ) - g_conf.add_argument( - "--no-b0-harmonization", - "--no_b0_harmonization", - action="store_true", - help="skip re-scaling dwi scans to have matching b=0 intensities", - ) - g_conf.add_argument( - "--denoise-after-combining", - "--denoise_after_combining", - action="store_true", - help="run ``dwidenoise`` after combining dwis, but before motion correction", - ) - g_conf.add_argument( - "--separate_all_dwis", - "--separate-all-dwis", - action="store_true", - help="don't attempt to combine dwis from multiple runs. Each will be " - "processed separately.", - ) - g_conf.add_argument( - "--distortion-group-merge", - "--distortion_group_merge", - action="store", - choices=["concat", "average", "none"], - default="none", - help="How to combine images across distorted groups.\n" - " - concatenate: append images in the 4th dimension\n " - " - average: if a whole sequence was duplicated in both PE\n" - " directions, average the corrected images of the same\n" - " q-space coordinate\n" - " - none: Default. Keep distorted groups separate", - ) - g_conf.add_argument( - "--anatomical-template", - required=False, - action="store", - choices=["MNI152NLin2009cAsym"], - default="MNI152NLin2009cAsym", - help="volume template space (default: MNI152NLin2009cAsym)", - ) g_conf.add_argument( "--output-resolution", "--output_resolution", action="store", # required when not recon-only (which can be specified in sysargs 2 ways) - required=not any(rcn in sys.argv for rcn in ["--recon-only", "--recon_only"]), + required=False, type=float, help="the isotropic voxel size in mm the data will be resampled to " "after preprocessing. If set to a lower value than the original voxel " "size, your data will be upsampled using BSpline interpolation.", ) - g_coreg = parser.add_argument_group("Options for dwi-to-Anatomical coregistration") - g_coreg.add_argument( - "--b0-to-t1w-transform", - "--b0_to_t1w_transform", - action="store", - default="Rigid", - choices=["Rigid", "Affine"], - help="Degrees of freedom when registering b0 to anatomical images. " - "6 degrees (rotation and translation) are used by default.", - ) - g_coreg.add_argument( - "--intramodal-template-iters", - "--intramodal_template_iters", - action="store", - default=0, - type=int, - help="Number of iterations for finding the midpoint image " - "from the b0 templates from all groups. Has no effect if there " - "is only one group. If 0, all b0 templates are directly registered " - "to the t1w image.", - ) - g_coreg.add_argument( - "--intramodal-template-transform", - "--intramodal_template_transform", - default="BSplineSyN", - choices=["Rigid", "Affine", "BSplineSyN", "SyN"], - action="store", - help="Transformation used for building the intramodal template.", - ) - # FreeSurfer options g_fs = parser.add_argument_group("Specific options for FreeSurfer preprocessing") g_fs.add_argument( @@ -478,105 +283,8 @@ def _bids_filter(value, parser): "at https://surfer.nmr.mgh.harvard.edu/registration.html", ) - g_moco = parser.add_argument_group("Specific options for motion correction and coregistration") - g_moco.add_argument( - "--b0-motion-corr-to", - "--bo_motion_corr_to", - action="store", - default="iterative", - choices=["iterative", "first"], - help='align to the "first" b0 volume or do an "iterative" registration' - " of all b0 images to their midpoint image (default: iterative)", - ) - g_moco.add_argument( - "--hmc-transform", - "--hmc_transform", - action="store", - default="Affine", - choices=["Affine", "Rigid"], - help="transformation to be optimized during head motion correction " "(default: affine)", - ) - g_moco.add_argument( - "--hmc_model", - "--hmc-model", - action="store", - default="eddy", - choices=["none", "3dSHORE", "eddy", "tensor"], - help='model used to generate target images for hmc. If "none" the ' - "non-b0 images will be warped using the same transform as their " - 'nearest b0 image. If "3dSHORE", SHORELine will be used. if "tensor", ' - "SHORELine iterations with a tensor model will be used", - ) - g_moco.add_argument( - "--eddy-config", - "--eddy_config", - action="store", - help="path to a json file with settings for the call to eddy. If no " - "json is specified, a default one will be used. The current default " - "json can be found here: " - "https://github.com/PennLINC/qsirecon/blob/main/qsirecon/data/eddy_params.json", - ) - g_moco.add_argument( - "--shoreline_iters", - "--shoreline-iters", - action="store", - type=int, - default=2, - help="number of SHORELine iterations. (default: 2)", - ) - - # Fieldmap options - g_fmap = parser.add_argument_group("Specific options for handling fieldmaps") - g_fmap.add_argument( - "--pepolar-method", - "--pepolar_method", - action="store", - default="TOPUP", - choices=["TOPUP", "DRBUDDI", "TOPUP+DRBUDDI"], - help="select which SDC method to use for PEPOLAR fieldmaps (default: TOPUP)", - ) - g_fmap.add_argument( - "--fmap-bspline", - action="store_true", - default=False, - help="Fit a B-Spline field using least-squares (experimental)", - ) - g_fmap.add_argument( - "--fmap-no-demean", - action="store_false", - default=True, - help="Do not remove median (within mask) from fieldmap", - ) - - # SyN-unwarp options - g_syn = parser.add_argument_group("Specific options for SyN distortion correction") - g_syn.add_argument( - "--use-syn-sdc", - nargs="?", - choices=["warn", "error"], - action="store", - const="error", - default=False, - help="Use fieldmap-less distortion correction based on anatomical image; " - "if unable, error (default) or warn based on optional argument.", - ) - g_syn.add_argument( - "--force-syn", - action="store_true", - default=False, - help="EXPERIMENTAL/TEMPORARY: Use SyN correction in addition to " - "fieldmap correction, if available", - ) - # arguments for reconstructing QSI data g_recon = parser.add_argument_group("Options for reconstructing qsirecon outputs") - g_recon.add_argument( - "--recon-only", - "--recon_only", - action="store_true", - default=False, - help="run only reconstruction, assumes preprocessing has already completed.", - ) g_recon.add_argument( "--recon-spec", "--recon_spec", @@ -584,20 +292,12 @@ def _bids_filter(value, parser): type=str, help="json file specifying a reconstruction pipeline to be run after preprocessing", ) - g_recon.add_argument( - "--recon-input", - "--recon_input", - action="store", - metavar="PATH", - type=Path, - help="use this directory as inputs to qsirecon. This option skips qsirecon.", - ) g_recon.add_argument( "--recon-input-pipeline", "--recon_input_pipeline", action="store", - default="qsirecon", - choices=["qsirecon", "ukb", "hcpya"], + default="qsiprep", + choices=["qsiprep", "ukb", "hcpya"], help="specify which pipeline was used to create the data specified " "as the --recon-input. Not necessary to specify if the data was " 'processed by qsirecon. Other options include "ukb" for data processed ' @@ -667,10 +367,12 @@ def _bids_filter(value, parser): "--notrack", action="store_true", default=False, - help="Opt-out of sending tracking information of this run to " - "the QSIRecon developers. This information helps to " - "improve QSIRecon and provides an indicator of real " - "world usage crucial for obtaining funding.", + help=( + "Opt-out of sending tracking information of this run to " + "the QSIRecon developers. This information helps to " + "improve QSIRecon and provides an indicator of real " + "world usage crucial for obtaining funding." + ), ) g_other.add_argument( "--debug", @@ -743,29 +445,11 @@ def parse_args(args=None, namespace=None): f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})" ) - if config.workflow.recon_spec and not config.execution.recon_input: - build_log.info("Running BOTH preprocessing and recon.") - config.execution.running_preproc_and_recon = True - config.execution.recon_input = config.execution.qsirecon_dir - - # Validate the tricky options here - if config.workflow.dwi_denoise_window != "auto": - try: - _ = int(config.workflow.dwi_denoise_window) - except ValueError: - raise Exception("--dwi-denoise-window must be an integer or 'auto'") - bids_dir = config.execution.bids_dir output_dir = config.execution.output_dir work_dir = config.execution.work_dir version = config.environment.version - if config.execution.qsirecon_dir is None: - config.execution.qsirecon_dir = output_dir / "qsirecon" - - if config.execution.qsirecon_dir is None: - config.execution.qsirecon_dir = output_dir / "qsirecon" - if config.execution.reportlets_dir is None: config.execution.reportlets_dir = work_dir / "reportlets" @@ -791,25 +475,8 @@ def parse_args(args=None, namespace=None): "Please modify the output path." ) - # Validate inputs - if not opts.skip_bids_validation: - if opts.recon_input is not None: - build_log.info("Skipping BIDS validation because inputs are BIDS derivatives") - else: - from ..utils.bids import validate_input_dir - - build_log.info( - "Making sure the input data is BIDS compliant (warnings can be ignored in most " - "cases)." - ) - validate_input_dir( - config.environment.exec_env, - opts.bids_dir, - opts.participant_label, - ) - # Setup directories - config.execution.log_dir = config.execution.qsirecon_dir / "logs" + config.execution.log_dir = config.execution.output_dir / "logs" # Check and create output and working directories config.execution.log_dir.mkdir(exist_ok=True, parents=True) config.execution.reportlets_dir.mkdir(exist_ok=True, parents=True) diff --git a/qsirecon/cli/run.py b/qsirecon/cli/run.py index 43af7891..5e4b87ab 100644 --- a/qsirecon/cli/run.py +++ b/qsirecon/cli/run.py @@ -184,10 +184,10 @@ def main(): ) write_derivative_description( config.execution.bids_dir, - config.execution.qsirecon_dir, + config.execution.output_dir, # dataset_links=config.execution.dataset_links, ) - write_bidsignore(config.execution.qsirecon_dir) + write_bidsignore(config.execution.output_dir) if failed_reports: print(failed_reports) @@ -198,8 +198,6 @@ def main(): # config.loggers.cli.error(msg) # if sentry_sdk is not None: # sentry_sdk.capture_message(msg, level='error') - if not config.execution.run_preproc_and_recon: - sys.exit(int(errno + failed_reports) > 0) # If preprocessing and recon are requested in the same call, start the recon workflow now. if errno > 0: @@ -207,130 +205,5 @@ def main(): config.loggers.workflow.critical( "Errors occurred during preprocessing - Recon will not run." ) - sys.exit(int(errno + failed_reports) > 0) - # POST-PREP RECON - del qsirecon_wf - # CRITICAL Call build_workflow(config_file, retval) in a subprocess. - # Because Python on Linux does not ever free virtual memory (VM), running the - # workflow construction jailed within a process preempts excessive VM buildup. - if "pdb" not in config.execution.debug: - with Manager() as mgr: - retval = mgr.dict() - p = Process(target=build_workflow, args=(str(config_file), "QSIRecon", retval)) - p.start() - p.join() - retval = dict(retval.items()) # Convert to base dictionary - - if p.exitcode: - retval["return_code"] = p.exitcode - - else: - retval = build_workflow(str(config_file), "QSIRecon", {}) - - exitcode = retval.get("return_code", 0) - qsirecon_wf = retval.get("workflow", None) - - # CRITICAL It would be bad to let the config file be changed between prep and recon. - # config.load(config_file) - - if qsirecon_wf and config.execution.write_graph: - qsirecon_wf.write_graph(graph2use="colored", format="svg", simple_form=True) - - exitcode = exitcode or (qsirecon_wf is None) * EX_SOFTWARE - if exitcode != 0: - sys.exit(exitcode) - - # Generate boilerplate - with Manager() as mgr: - from .workflow import build_boilerplate - - p = Process(target=build_boilerplate, args=(str(config_file), qsirecon_wf)) - p.start() - p.join() - - if config.execution.boilerplate_only: - sys.exit(int(exitcode > 0)) - - # Clean up master process before running workflow, which may create forks - gc.collect() - - # Sentry tracking - if sentry_sdk is not None: - with sentry_sdk.configure_scope() as scope: - scope.set_tag("run_uuid", config.execution.run_uuid) - scope.set_tag("npart", len(config.execution.participant_label)) - sentry_sdk.add_breadcrumb(message="QSIPostRecon started", level="info") - sentry_sdk.capture_message("QSIRecon started", level="info") - - config.loggers.workflow.log( - 15, - "\n".join(["QSIRecon config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]), - ) - config.loggers.workflow.log(25, "QSIRecon started!") - errno = 1 # Default is error exit unless otherwise set - try: - qsirecon_wf.run(**config.nipype.get_plugin()) - except Exception as e: - if not config.execution.notrack: - from ..utils.sentry import process_crashfile - - crashfolders = [ - config.execution.qsirecon_dir / f"sub-{s}" / "log" / config.execution.run_uuid - for s in config.execution.participant_label - ] - for crashfolder in crashfolders: - for crashfile in crashfolder.glob("crash*.*"): - process_crashfile(crashfile) - - if sentry_sdk is not None and "Workflow did not execute cleanly" not in str(e): - sentry_sdk.capture_exception(e) - config.loggers.workflow.critical("QSIRecon failed: %s", e) - raise - else: - config.loggers.workflow.log(25, "QSIRecon finished successfully!") - if sentry_sdk is not None: - success_message = "QSIPostRecon finished without errors" - sentry_sdk.add_breadcrumb(message=success_message, level="info") - sentry_sdk.capture_message(success_message, level="info") - - # Bother users with the boilerplate only iff the workflow went okay. - boiler_file = config.execution.qsirecon_dir / "logs" / "CITATION.md" - if boiler_file.exists(): - if config.environment.exec_env in ( - "singularity", - "docker", - "qsirecon-docker", - ): - boiler_file = Path("") / boiler_file.relative_to( - config.execution.output_dir - ) - config.loggers.workflow.log( - 25, - "Works derived from this QSIRecon execution should include the " - f"boilerplate text found in {boiler_file}.", - ) - - errno = 0 - finally: - - from ..viz.reports import generate_reports - - # Generate reports phase - # session_list = ( - # config.execution.get().get('bids_filters', {}).get('dwi', {}).get('session') - # ) - - failed_reports = generate_reports( - config.execution.participant_label, - # session_list=session_list, - ) - write_derivative_description( - config.execution.bids_dir, - config.execution.qsirecon_dir, - # dataset_links=config.execution.dataset_links, - ) - write_bidsignore(config.execution.qsirecon) - - if failed_reports: - print(failed_reports) + sys.exit(int((errno + failed_reports) > 0)) diff --git a/qsirecon/cli/workflow.py b/qsirecon/cli/workflow.py index cba54128..b37e329f 100644 --- a/qsirecon/cli/workflow.py +++ b/qsirecon/cli/workflow.py @@ -52,7 +52,6 @@ def build_workflow(config_file, retval): config.load(config_file) build_log = config.loggers.workflow - # qsirecon_dir = config.execution.qsirecon_dir version = config.environment.version retval["return_code"] = 1 @@ -69,7 +68,7 @@ def build_workflow(config_file, retval): build_log.log(25, f"\n{' ' * 9}".join(banner)) # warn if older results exist: check for dataset_description.json in output folder - # msg = check_pipeline_version("QSIRecon", version, qsirecon_dir / "dataset_description.json") + # msg = check_pipeline_version("QSIRecon", version, output_dir / "dataset_description.json") # if msg is not None: # build_log.warning(msg) @@ -97,7 +96,7 @@ def build_workflow(config_file, retval): failed_reports = generate_reports( config.execution.participant_label, - config.execution.qsirecon_dir, + config.execution.output_dir, config.execution.run_uuid, session_list=session_list, ) @@ -162,7 +161,7 @@ def build_boilerplate(config_file, workflow): from .. import config config.load(config_file) - logs_path = config.execution.qsirecon_dir / "logs" + logs_path = config.execution.output_dir / "logs" boilerplate = workflow.visit_desc() citation_files = { ext: logs_path / ("CITATION.%s" % ext) for ext in ("bib", "tex", "md", "html") diff --git a/qsirecon/config.py b/qsirecon/config.py index 744b38a6..c08cd119 100644 --- a/qsirecon/config.py +++ b/qsirecon/config.py @@ -28,7 +28,7 @@ This module implements the memory structures to keep a consistent, singleton config. Settings are passed across processes via filesystem, and a copy of the settings for each run and subject is left under -``/sub-/log//qsirecon.toml``. +``/sub-/log//qsirecon.toml``. Settings are stored using :abbr:`ToML (Tom's Markup Language)`. The module has a :py:func:`~qsirecon.config.to_filename` function to allow writing out the settings to hard disk in *ToML* format, which looks like: @@ -412,17 +412,10 @@ class execution(_Config): """Output verbosity.""" low_mem = None """Utilize uncompressed NIfTIs and other tricks to minimize memory allocation.""" - # md_only_boilerplate = False - # """Do not convert boilerplate from MarkDown to LaTex and HTML.""" notrack = False """Do not collect telemetry information for *QSIRecon*.""" output_dir = None """Folder where derivatives will be stored.""" - output_layout = None - """Layout of derivatives within output_dir.""" - # output_spaces = None - # """List of (non)standard spaces designated (with the ``--output-spaces`` flag of - # the command line) as spatial references for outputs.""" reports_only = False """Only build the reports, based on the reportlets found in a cached working directory.""" run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}" @@ -431,24 +424,10 @@ class execution(_Config): """Disable ODF recon reports.""" participant_label = None """List of participant identifiers that are to be preprocessed.""" - qsirecon_dir = None - """Root of QSIRecon BIDS Derivatives dataset. Depends on output_layout.""" - qsirecon_dir = None - """Root of QSIRecon BIDS Derivatives dataset.""" - recon_input = None - """Directory containing QSIRecon derivatives to run through recon workflows.""" freesurfer_input = None """Directory containing FreeSurfer directories to use for recon workflows.""" - recon_only = False - """Run only recon workflows.""" reportlets_dir = None """Path where reportlets are written.""" - run_preproc_and_recon = False - """Will both preproc and recon be run in a single call?""" - skip_anat_based_spatial_normalization = False - """Should we skip normalizing the anatomical data to a template?""" - task_id = None - """Select a particular task from all available in the dataset.""" templateflow_home = _templateflow_home """The root folder of the TemplateFlow client.""" work_dir = Path("work").absolute() @@ -471,9 +450,6 @@ class execution(_Config): "layout", "log_dir", "output_dir", - "qsirecon_dir", - "qsirecon_dir", - "recon_input", "reportlets_dir", "templateflow_home", "work_dir", @@ -567,83 +543,18 @@ def _process_value(value): class workflow(_Config): """Configure the particular execution graph of this workflow.""" - anat_modality = None - """Modality to use as the anatomical reference. Images of this - contrast will be skull stripped and segmented for use in the - visual reports and reconstruction. If --infant, T2w is forced.""" - anat_only = False - """Execute the anatomical postprocessing only.""" - anatomical_template = None - """Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences` - instance keeping standard and nonstandard spaces.""" b0_threshold = None """Any value in the .bval file less than this will be considered a b=0 image.""" - b0_motion_corr_to = None - """Perform SHORELine's initial b=0-based registration to first volume? - Or make a template? Either 'iterative' or 'first'""" - b0_to_t1w_transform = None - """Transformation model for intramodal registration.""" - b1_biascorrect_stage = None - """The stage of processing at which to apply B1 bias correction. Either "final" (after - resampling), "none" (skipped entirely) or "legacy" (before concatenation).""" - cifti_output = None - """Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``.""" - denoise_after_combining = False - """Run ``dwidenoise`` after combining dwis, but before motion correction.""" - denoise_method = None - """Image-based denoising method. Either "dwidenoise" (MRtrix), "patch2self" (DIPY) - or "none".""" - distortion_group_merge = None - """How to combine images across distortion groups (concatenate, average or none).""" - do_reconall = True - """Run FreeSurfer's surface reconstruction (ignored).""" - dwi_denoise_window = None - """Window size in voxels for image-based denoising, integer or "auto".""" - dwi_no_biascorr = None - """DEPRECATED: see --b1-biascorrect-stage.""" - dwi_only = False - """DEPRECATED: True if anat_modality is 'none'.""" - eddy_config = None - """Configuration for running Eddy.""" - fmap_bspline = None - """Regularize fieldmaps with a field of B-Spline basis.""" - fmap_demean = None - """Remove the mean from fieldmaps.""" - force_syn = None - """Run *fieldmap-less* susceptibility-derived distortions estimation.""" - hmc_model = None - """Model used to generate target images for hmc.""" - hmc_transform = None - """Transformation to be used in SHORELine.""" - ignore = None - """Ignore particular steps for *QSIRecon*.""" infant = False """Configure pipelines specifically for infant brains""" - intramodal_template_iters = None - """Number of iterations for intramodal template construction.""" - intramodal_template_transform = None - """Transformation used for building the intramodal template.""" longitudinal = False """Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag.""" - no_b0_harmonization = False - """Skip re-scaling dwi scans to have matching b=0 intensities.""" - output_resolution = None - """Isotropic voxel size for outputs.""" - pepolar_method = None - """SDC method to be used for PEPOLAR fieldmaps.""" recon_input_pipeline = None - """Specifies which pipeline was used to preprocess data in ``recon_input``""" + """Specifies which pipeline was used to preprocess data in ``bids_dir``.""" recon_spec = None """Recon workflow specification.""" - separate_all_dwis = False - """Process all dwis separately - do not attempt concatenation.""" - shoreline_iters = None - """How many iterations to run SHORELine.""" - unringing_method = None - """Method for Gibbs-ringing removal. Either "none", "mrdegibbs" or "rpg".""" - use_syn_sdc = None - """Run *fieldmap-less* susceptibility-derived distortions estimation - in the absence of any alternatives.""" + output_resolution = None + """Isotropic voxel size for outputs.""" class loggers: @@ -833,14 +744,5 @@ def init_spaces(checkpoint=True): if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)): spaces.add(Reference("MNI152NLin2009cAsym", {})) - # Ensure user-defined spatial references for outputs are correctly parsed. - # Certain options require normalization to a space not explicitly defined by users. - # These spaces will not be included in the final outputs. - cifti_output = workflow.cifti_output - if cifti_output: - # CIFTI grayordinates to corresponding FSL-MNI resolutions. - vol_res = "2" if cifti_output == "91k" else "1" - spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res})) - # Make the SpatialReferences object available workflow.spaces = spaces diff --git a/qsirecon/interfaces/interchange.py b/qsirecon/interfaces/interchange.py index 50c9c606..55609287 100644 --- a/qsirecon/interfaces/interchange.py +++ b/qsirecon/interfaces/interchange.py @@ -17,14 +17,14 @@ ] CREATEABLE_ANATOMICAL_OUTPUTS = [ "fs_5tt_hsvs", - "qsirecon_5tt_hsvs", - "qsirecon_5tt_fast", - "fs_to_qsirecon_transform_itk", - "fs_to_qsirecon_transform_mrtrix", + "qsiprep_5tt_hsvs", + "qsiprep_5tt_fast", + "fs_to_qsiprep_transform_itk", + "fs_to_qsiprep_transform_mrtrix", ] # These come directly from QSIRecon outputs. They're aligned to the DWIs in AC-PC -qsirecon_highres_anatomical_ingressed_fields = ( +qsiprep_highres_anatomical_ingressed_fields = ( QSIReconAnatomicalIngress.output_spec.class_editable_traits() ) @@ -32,13 +32,13 @@ # anatomical files (segmentations/masks/etc) that can be used downstream. # These are **independent** of the DWI data and handled separately anatomical_workflow_outputs = ( - qsirecon_highres_anatomical_ingressed_fields + qsiprep_highres_anatomical_ingressed_fields + FS_FILES_TO_REGISTER + CREATEABLE_ANATOMICAL_OUTPUTS ) # These are read directly from QSIRecon's dwi results. -qsirecon_output_names = QsiReconDWIIngress().output_spec.class_editable_traits() +qsiprep_output_names = QsiReconDWIIngress().output_spec.class_editable_traits() # dMRI + registered anatomical fields recon_workflow_anatomical_input_fields = anatomical_workflow_outputs + [ @@ -50,14 +50,14 @@ ] # Check that no conflicts have been introduced -overlapping_names = set(qsirecon_output_names).intersection(recon_workflow_anatomical_input_fields) +overlapping_names = set(qsiprep_output_names).intersection(recon_workflow_anatomical_input_fields) if overlapping_names: raise Exception( "Someone has added overlapping outputs between the anatomical " "and dwi inputs: " + " ".join(overlapping_names) ) -recon_workflow_input_fields = qsirecon_output_names + recon_workflow_anatomical_input_fields +recon_workflow_input_fields = qsiprep_output_names + recon_workflow_anatomical_input_fields default_input_set = set(recon_workflow_input_fields) default_connections = [(trait, trait) for trait in recon_workflow_input_fields] diff --git a/qsirecon/tests/data/amico_noddi_outputs.txt b/qsirecon/tests/data/amico_noddi_outputs.txt index 025f72ef..9032b10a 100644 --- a/qsirecon/tests/data/amico_noddi_outputs.txt +++ b/qsirecon/tests/data/amico_noddi_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC.html qsirecon qsirecon-NODDI qsirecon-NODDI/sub-PNC @@ -18,4 +15,8 @@ qsirecon-NODDI/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_model-no qsirecon-NODDI/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_model-noddi_mdp-isovf_dwimap.nii.gz qsirecon-NODDI/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_model-noddi_mdp-od_dwimap.nii.gz qsirecon-NODDI/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_model-noddi_mfp-direction_dwimap.nii.gz +qsirecon/dwiqc.json +qsirecon/sub-PNC qsirecon/sub-PNC +qsirecon/sub-PNC.html +sub-PNC diff --git a/qsirecon/tests/data/autotrack_outputs.txt b/qsirecon/tests/data/autotrack_outputs.txt index 3b6d2a4d..27283456 100644 --- a/qsirecon/tests/data/autotrack_outputs.txt +++ b/qsirecon/tests/data/autotrack_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD -qsirecon/sub-ABCD.html qsirecon qsirecon-DSIStudio qsirecon-DSIStudio/sub-ABCD @@ -17,4 +14,7 @@ qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_bun qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_bundlestats.csv qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_dwimap.fib.gz qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_mapping.map.gz +qsirecon/dwiqc.json qsirecon/sub-ABCD +qsirecon/sub-ABCD.html +sub-ABCD diff --git a/qsirecon/tests/data/dipy_dki_outputs.txt b/qsirecon/tests/data/dipy_dki_outputs.txt index 3b26e750..fd7a1eb4 100644 --- a/qsirecon/tests/data/dipy_dki_outputs.txt +++ b/qsirecon/tests/data/dipy_dki_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD -qsirecon/sub-ABCD.html qsirecon qsirecon-DKI qsirecon-DKI/sub-ABCD @@ -21,4 +18,7 @@ qsirecon-DKI/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-dki qsirecon-DKI/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-dki_mdp-RD_dwimap.nii.gz qsirecon-DKI/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-dki_mdp-RK_dwimap.nii.gz qsirecon-DKI/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-tensor_mdp-FA_dwimap.nii.gz +qsirecon/dwiqc.json qsirecon/sub-ABCD +qsirecon/sub-ABCD.html +sub-ABCD diff --git a/qsirecon/tests/data/dipy_mapmri_outputs.txt b/qsirecon/tests/data/dipy_mapmri_outputs.txt index 1dfe1ec5..916b992a 100644 --- a/qsirecon/tests/data/dipy_mapmri_outputs.txt +++ b/qsirecon/tests/data/dipy_mapmri_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD -qsirecon/sub-ABCD.html qsirecon qsirecon-DIPYMAPMRI qsirecon-DIPYMAPMRI/sub-ABCD @@ -23,4 +20,7 @@ qsirecon-mapmri_recon/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_ qsirecon-mapmri_recon/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-mapmri_mdp-RTOP_dwimap.nii.gz qsirecon-mapmri_recon/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-mapmri_mdp-RTPP_dwimap.nii.gz qsirecon-mapmri_recon/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-mapmri_mfp-lapnorm_dwimap.nii.gz +qsirecon/dwiqc.json qsirecon/sub-ABCD +qsirecon/sub-ABCD.html +sub-ABCD diff --git a/qsirecon/tests/data/drbuddi_rpe_outputs.txt b/qsirecon/tests/data/drbuddi_rpe_outputs.txt deleted file mode 100644 index ec6e0747..00000000 --- a/qsirecon/tests/data/drbuddi_rpe_outputs.txt +++ /dev/null @@ -1,23 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-tinytensors -qsirecon/sub-tinytensors.html -qsirecon/sub-tinytensors/dwi -qsirecon/sub-tinytensors/dwi/sub-tinytensors_confounds.tsv -qsirecon/sub-tinytensors/dwi/sub-tinytensors_desc-ImageQC_dwi.csv -qsirecon/sub-tinytensors/dwi/sub-tinytensors_desc-SliceQC_dwi.json -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dwiqc.json -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-tinytensors/dwi/sub-tinytensors_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/drbuddi_shoreline_epi_outputs.txt b/qsirecon/tests/data/drbuddi_shoreline_epi_outputs.txt deleted file mode 100644 index 5f60dedb..00000000 --- a/qsirecon/tests/data/drbuddi_shoreline_epi_outputs.txt +++ /dev/null @@ -1,23 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-tinytensors -qsirecon/sub-tinytensors.html -qsirecon/sub-tinytensors/dwi -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_confounds.tsv -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_desc-ImageQC_dwi.csv -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_desc-SliceQC_dwi.json -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_dwiqc.json -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_desc-none_cnr.nii.gz -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-tinytensors/dwi/sub-tinytensors_dir-PA_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/drbuddi_tensorline_epi_outputs.txt b/qsirecon/tests/data/drbuddi_tensorline_epi_outputs.txt deleted file mode 100644 index 724b993a..00000000 --- a/qsirecon/tests/data/drbuddi_tensorline_epi_outputs.txt +++ /dev/null @@ -1,23 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC.html -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_confounds.tsv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-ImageQC_dwi.csv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-SliceQC_dwi.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_dwiqc.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.b -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-tensor_cnr.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/dscsdsi_fmap_outputs.txt b/qsirecon/tests/data/dscsdsi_fmap_outputs.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/qsirecon/tests/data/dscsdsi_outputs.txt b/qsirecon/tests/data/dscsdsi_outputs.txt deleted file mode 100644 index 560273ab..00000000 --- a/qsirecon/tests/data/dscsdsi_outputs.txt +++ /dev/null @@ -1,33 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-tester -qsirecon/sub-tester.html -qsirecon/sub-tester/anat -qsirecon/sub-tester/anat/sub-tester_desc-aseg_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_desc-brain_mask.nii.gz -qsirecon/sub-tester/anat/sub-tester_desc-preproc_T1w.nii.gz -qsirecon/sub-tester/anat/sub-tester_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_from-T1wACPC_to-T1wNative_mode-image_xfm.mat -qsirecon/sub-tester/anat/sub-tester_from-T1wNative_to-T1wACPC_mode-image_xfm.mat -qsirecon/sub-tester/anat/sub-tester_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-tester/dwi -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_confounds.tsv -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_desc-ImageQC_dwi.csv -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_desc-SliceQC_dwi.json -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_dwiqc.json -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-3dSHORE_cnr.nii.gz -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-tester/dwi/sub-tester_acq-HASC55AP_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/dsdti_fmap_outputs.txt b/qsirecon/tests/data/dsdti_fmap_outputs.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/qsirecon/tests/data/dsdti_nofmap_outputs.txt b/qsirecon/tests/data/dsdti_nofmap_outputs.txt deleted file mode 100644 index bf101d40..00000000 --- a/qsirecon/tests/data/dsdti_nofmap_outputs.txt +++ /dev/null @@ -1,33 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC.html -qsirecon/sub-PNC/anat -qsirecon/sub-PNC/anat/sub-PNC_desc-aseg_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-T1wACPC_to-T1wNative_mode-image_xfm.mat -qsirecon/sub-PNC/anat/sub-PNC_from-T1wNative_to-T1wACPC_mode-image_xfm.mat -qsirecon/sub-PNC/anat/sub-PNC_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_confounds.tsv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-ImageQC_dwi.csv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-SliceQC_dwi.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_dwiqc.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.b -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/dsdti_synfmap_outputs.txt b/qsirecon/tests/data/dsdti_synfmap_outputs.txt deleted file mode 100644 index bf101d40..00000000 --- a/qsirecon/tests/data/dsdti_synfmap_outputs.txt +++ /dev/null @@ -1,33 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC.html -qsirecon/sub-PNC/anat -qsirecon/sub-PNC/anat/sub-PNC_desc-aseg_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-T1wACPC_to-T1wNative_mode-image_xfm.mat -qsirecon/sub-PNC/anat/sub-PNC_from-T1wNative_to-T1wACPC_mode-image_xfm.mat -qsirecon/sub-PNC/anat/sub-PNC_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_confounds.tsv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-ImageQC_dwi.csv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-SliceQC_dwi.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_dwiqc.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.b -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/dsdti_topup_outputs.txt b/qsirecon/tests/data/dsdti_topup_outputs.txt deleted file mode 100644 index bf101d40..00000000 --- a/qsirecon/tests/data/dsdti_topup_outputs.txt +++ /dev/null @@ -1,33 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC.html -qsirecon/sub-PNC/anat -qsirecon/sub-PNC/anat/sub-PNC_desc-aseg_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-brain_mask.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_desc-preproc_T1w.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_dseg.nii.gz -qsirecon/sub-PNC/anat/sub-PNC_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-T1wACPC_to-T1wNative_mode-image_xfm.mat -qsirecon/sub-PNC/anat/sub-PNC_from-T1wNative_to-T1wACPC_mode-image_xfm.mat -qsirecon/sub-PNC/anat/sub-PNC_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-PNC/anat/sub-PNC_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-PNC/dwi -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_confounds.tsv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-ImageQC_dwi.csv -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_desc-SliceQC_dwi.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_dwiqc.json -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.b -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/eddy_config.json b/qsirecon/tests/data/eddy_config.json deleted file mode 100644 index 5a6e9f09..00000000 --- a/qsirecon/tests/data/eddy_config.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "flm": "linear", - "slm": "none", - "fep": false, - "interp": "spline", - "nvoxhp": 100, - "fudge_factor": 10, - "dont_sep_offs_move": false, - "dont_peas": false, - "niter": 2, - "method": "jac", - "repol": true, - "num_threads": 1, - "is_shelled": true, - "use_cuda": false, - "cnr_maps": true, - "residuals": false, - "output_type": "NIFTI_GZ", - "args": "" -} \ No newline at end of file diff --git a/qsirecon/tests/data/eddy_cuda_config.json b/qsirecon/tests/data/eddy_cuda_config.json deleted file mode 100644 index 56327416..00000000 --- a/qsirecon/tests/data/eddy_cuda_config.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "flm": "quadratic", - "slm": "none", - "fep": false, - "interp": "spline", - "nvoxhp": 100, - "fudge_factor": 10, - "dont_sep_offs_move": false, - "dont_peas": false, - "niter": 2, - "method": "jac", - "repol": true, - "num_threads": 1, - "is_shelled": true, - "use_cuda": true, - "mporder": 1, - "cnr_maps": true, - "residuals": false, - "output_type": "NIFTI_GZ", - "args": "" -} \ No newline at end of file diff --git a/qsirecon/tests/data/forrest_gump_filter.json b/qsirecon/tests/data/forrest_gump_filter.json deleted file mode 100644 index 42b7af2c..00000000 --- a/qsirecon/tests/data/forrest_gump_filter.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "t1w": { - "reconstruction": "autobox" - }, - "t2w": { - "reconstruction": "autobox" - } -} diff --git a/qsirecon/tests/data/forrest_gump_outputs.txt b/qsirecon/tests/data/forrest_gump_outputs.txt deleted file mode 100644 index a1500936..00000000 --- a/qsirecon/tests/data/forrest_gump_outputs.txt +++ /dev/null @@ -1,35 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-01 -qsirecon/sub-01.html -qsirecon/sub-01/anat -qsirecon/sub-01/anat/sub-01_desc-aseg_dseg.nii.gz -qsirecon/sub-01/anat/sub-01_desc-brain_mask.nii.gz -qsirecon/sub-01/anat/sub-01_desc-preproc_T1w.nii.gz -qsirecon/sub-01/anat/sub-01_dseg.nii.gz -qsirecon/sub-01/anat/sub-01_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-01/anat/sub-01_from-T1wACPC_to-T1wNative_mode-image_xfm.mat -qsirecon/sub-01/anat/sub-01_from-T1wNative_to-T1wACPC_mode-image_xfm.mat -qsirecon/sub-01/anat/sub-01_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-01/ses-forrestgump -qsirecon/sub-01/ses-forrestgump/anat -qsirecon/sub-01/ses-forrestgump/anat/sub-01_ses-forrestgump_rec-autobox_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-01/ses-forrestgump/dwi -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_confounds.tsv -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_desc-ImageQC_dwi.csv -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_desc-SliceQC_dwi.json -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_dwiqc.json -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_desc-eddy_cnr.nii.gz -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_desc-preproc_dwi.b -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-01/ses-forrestgump/dwi/sub-01_ses-forrestgump_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/intramodal_template_outputs.txt b/qsirecon/tests/data/intramodal_template_outputs.txt deleted file mode 100644 index ec794e7c..00000000 --- a/qsirecon/tests/data/intramodal_template_outputs.txt +++ /dev/null @@ -1,69 +0,0 @@ -/tmp/twoses/derivatives -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-tester -qsirecon/sub-tester.html -qsirecon/sub-tester/anat -qsirecon/sub-tester/anat/sub-tester_desc-brain_mask.nii.gz -qsirecon/sub-tester/anat/sub-tester_desc-preproc_T1w.nii.gz -qsirecon/sub-tester/anat/sub-tester_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-tester/anat/sub-tester_label-CSF_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_label-GM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_label-WM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_dseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz -qsirecon/sub-tester/anat/sub-tester_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz -qsirecon/sub-tester/figures -qsirecon/sub-tester/figures/sub-tester_imtcoreg.svg -qsirecon/sub-tester/figures/sub-tester_seg_brainmask.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_carpetplot.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_coreg.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_desc-resampled_b0ref.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_dwi_denoise_ses_1_acq_HASC55PA_dwi_wf_biascorr.svg -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_sampling_scheme.gif -qsirecon/sub-tester/figures/sub-tester_ses-1_acq-HASC55PA_tointramodal.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_carpetplot.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_coreg.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_desc-resampled_b0ref.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_dwi_denoise_ses_2_acq_HASC55AP_dwi_wf_biascorr.svg -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_sampling_scheme.gif -qsirecon/sub-tester/figures/sub-tester_ses-2_acq-HASC55AP_tointramodal.svg -qsirecon/sub-tester/figures/sub-tester_t1_2_mni.svg -qsirecon/sub-tester/ses-1 -qsirecon/sub-tester/ses-1/anat -qsirecon/sub-tester/ses-1/anat/sub-tester_ses-1_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-tester/ses-1/dwi -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_confounds.tsv -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_desc-ImageQC_dwi.csv -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_desc-SliceQC_dwi.json -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_dwiqc.json -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-none_cnr.nii.gz -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tester/ses-1/dwi/sub-tester_ses-1_acq-HASC55PA_space-T1w_dwiref.nii.gz -qsirecon/sub-tester/ses-2 -qsirecon/sub-tester/ses-2/dwi -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_confounds.tsv -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_desc-ImageQC_dwi.csv -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_desc-SliceQC_dwi.json -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_dwiqc.json -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-none_cnr.nii.gz -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.b -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-tester/ses-2/dwi/sub-tester_ses-2_acq-HASC55AP_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/maternal_brain_project_filter.json b/qsirecon/tests/data/maternal_brain_project_filter.json deleted file mode 100644 index bb7f364e..00000000 --- a/qsirecon/tests/data/maternal_brain_project_filter.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "t1w": { - "reconstruction": "autobox" - } -} diff --git a/qsirecon/tests/data/maternal_brain_project_outputs.txt b/qsirecon/tests/data/maternal_brain_project_outputs.txt deleted file mode 100644 index 3d5e359f..00000000 --- a/qsirecon/tests/data/maternal_brain_project_outputs.txt +++ /dev/null @@ -1,36 +0,0 @@ -qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-01 -qsirecon/sub-01.html -qsirecon/sub-01/anat -qsirecon/sub-01/anat/sub-01_desc-aseg_dseg.nii.gz -qsirecon/sub-01/anat/sub-01_desc-brain_mask.nii.gz -qsirecon/sub-01/anat/sub-01_desc-preproc_T1w.nii.gz -qsirecon/sub-01/anat/sub-01_dseg.nii.gz -qsirecon/sub-01/anat/sub-01_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5 -qsirecon/sub-01/anat/sub-01_from-T1wACPC_to-T1wNative_mode-image_xfm.mat -qsirecon/sub-01/anat/sub-01_from-T1wNative_to-T1wACPC_mode-image_xfm.mat -qsirecon/sub-01/anat/sub-01_from-T1w_to-MNI152NLin2009cAsym_mode-image_xfm.h5 -qsirecon/sub-01/ses-01 -qsirecon/sub-01/ses-01/anat -qsirecon/sub-01/ses-01/anat/sub-01_ses-01_rec-autobox_from-orig_to-T1w_mode-image_xfm.txt -qsirecon/sub-01/ses-01/dwi -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_confounds.tsv -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_desc-ImageQC_dwi.csv -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_desc-SliceQC_dwi.json -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_dwiqc.json -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_hmcOptimization.csv -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_desc-3dSHORE_cnr.nii.gz -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_desc-brain_mask.nii.gz -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_desc-preproc_dwi.b -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_desc-preproc_dwi.bval -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_desc-preproc_dwi.bvec -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_desc-preproc_dwi.nii.gz -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_desc-preproc_dwi.txt -qsirecon/sub-01/ses-01/dwi/sub-01_ses-01_space-T1w_dwiref.nii.gz diff --git a/qsirecon/tests/data/mrtrix3_recon_outputs.txt b/qsirecon/tests/data/mrtrix3_recon_outputs.txt index a97f5e78..ab52c710 100644 --- a/qsirecon/tests/data/mrtrix3_recon_outputs.txt +++ b/qsirecon/tests/data/mrtrix3_recon_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD -qsirecon/sub-ABCD.html qsirecon qsirecon-MRtrix3_act-FAST qsirecon-MRtrix3_act-FAST/sub-ABCD @@ -49,4 +46,7 @@ qsirecon-anat/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_atlas-sc qsirecon-anat/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_atlas-schaefer400_dseg.mif.gz qsirecon-anat/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_atlas-schaefer400_dseg.nii.gz qsirecon-anat/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_atlas-schaefer400_dseg.txt +qsirecon/dwiqc.json qsirecon/sub-ABCD +qsirecon/sub-ABCD.html +sub-ABCD diff --git a/qsirecon/tests/data/mrtrix_singleshell_ss3t_act_outputs.txt b/qsirecon/tests/data/mrtrix_singleshell_ss3t_act_outputs.txt index 56d70350..1cf9586d 100644 --- a/qsirecon/tests/data/mrtrix_singleshell_ss3t_act_outputs.txt +++ b/qsirecon/tests/data/mrtrix_singleshell_ss3t_act_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC.html qsirecon qsirecon-MRtrix3_fork-SS3T_act-FAST qsirecon-MRtrix3_fork-SS3T_act-FAST/sub-PNC @@ -49,4 +46,8 @@ qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-sch qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-schaefer400_dseg.mif.gz qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-schaefer400_dseg.nii.gz qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-schaefer400_dseg.txt +qsirecon/dwiqc.json +qsirecon/sub-PNC qsirecon/sub-PNC +qsirecon/sub-PNC.html +sub-PNC diff --git a/qsirecon/tests/data/mrtrix_singleshell_ss3t_noact_outputs.txt b/qsirecon/tests/data/mrtrix_singleshell_ss3t_noact_outputs.txt index b632f6c1..878e39d2 100644 --- a/qsirecon/tests/data/mrtrix_singleshell_ss3t_noact_outputs.txt +++ b/qsirecon/tests/data/mrtrix_singleshell_ss3t_noact_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-PNC -qsirecon/sub-PNC.html qsirecon qsirecon-MRtrix3_fork-SS3T_act-None qsirecon-MRtrix3_fork-SS3T_act-None/sub-PNC @@ -49,4 +46,8 @@ qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-sch qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-schaefer400_dseg.mif.gz qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-schaefer400_dseg.nii.gz qsirecon-anat/sub-PNC/dwi/sub-PNC_acq-realistic_space-T1w_desc-preproc_atlas-schaefer400_dseg.txt +qsirecon/dwiqc.json +qsirecon/sub-PNC qsirecon/sub-PNC +qsirecon/sub-PNC.html +sub-PNC diff --git a/qsirecon/tests/data/multi_t1w_outputs.txt b/qsirecon/tests/data/multi_t1w_outputs.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/qsirecon/tests/data/pyafq_recon_external_trk_outputs.txt b/qsirecon/tests/data/pyafq_recon_external_trk_outputs.txt index 82f7b11d..74d57ded 100644 --- a/qsirecon/tests/data/pyafq_recon_external_trk_outputs.txt +++ b/qsirecon/tests/data/pyafq_recon_external_trk_outputs.txt @@ -1,12 +1,12 @@ qsirecon -qsirecon/dataset_description.json +dataset_description.json qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex +sub-ABCD qsirecon/sub-ABCD.html qsirecon qsirecon-MRtrix3 diff --git a/qsirecon/tests/data/pyafq_recon_full_outputs.txt b/qsirecon/tests/data/pyafq_recon_full_outputs.txt index f0cb7e26..54745c2b 100644 --- a/qsirecon/tests/data/pyafq_recon_full_outputs.txt +++ b/qsirecon/tests/data/pyafq_recon_full_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD -qsirecon/sub-ABCD.html qsirecon qsirecon-PYAFQ qsirecon-PYAFQ/sub-ABCD @@ -111,4 +108,7 @@ qsirecon-PYAFQ/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_dwimap/ qsirecon-PYAFQ/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_dwimap/viz_core_bundles/sub-ABCD_acq-10per000_coordsys-RASMM_trkmethod-probCSD_recogmethod-AFQ_desc-LeftCorticospinalviz_dwi.html qsirecon-PYAFQ/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_dwimap/viz_core_bundles/sub-ABCD_acq-10per000_coordsys-RASMM_trkmethod-probCSD_recogmethod-AFQ_desc-LeftSuperiorLongitudinalviz_dwi.html qsirecon-PYAFQ/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_dwimap/viz_core_bundles/sub-ABCD_acq-10per000_coordsys-RASMM_trkmethod-probCSD_recogmethod-AFQ_desc-RightCorticospinalviz_dwi.html +qsirecon/dwiqc.json qsirecon/sub-ABCD +qsirecon/sub-ABCD.html +sub-ABCD diff --git a/qsirecon/tests/data/scalar_mapper_outputs.txt b/qsirecon/tests/data/scalar_mapper_outputs.txt index f6d7b827..c1e51c72 100644 --- a/qsirecon/tests/data/scalar_mapper_outputs.txt +++ b/qsirecon/tests/data/scalar_mapper_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD -qsirecon/sub-ABCD.html qsirecon qsirecon-DIPYDKI qsirecon-DIPYDKI/sub-ABCD @@ -73,4 +70,7 @@ qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_mod qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-tensor_mfp-tyy_dwimap.nii.gz qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-tensor_mfp-tyz_dwimap.nii.gz qsirecon-DSIStudio/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-tensor_mfp-tzz_dwimap.nii.gz -qsirecon/sub-ABCD \ No newline at end of file +qsirecon/dwiqc.json +qsirecon/sub-ABCD +qsirecon/sub-ABCD.html +sub-ABCD diff --git a/qsirecon/tests/data/tortoise_recon_outputs.txt b/qsirecon/tests/data/tortoise_recon_outputs.txt index a83328a6..6fe4c50f 100644 --- a/qsirecon/tests/data/tortoise_recon_outputs.txt +++ b/qsirecon/tests/data/tortoise_recon_outputs.txt @@ -1,13 +1,10 @@ +dataset_description.json +logs +logs/CITATION.bib +logs/CITATION.html +logs/CITATION.md +logs/CITATION.tex qsirecon -qsirecon/dataset_description.json -qsirecon/dwiqc.json -qsirecon/logs -qsirecon/logs/CITATION.bib -qsirecon/logs/CITATION.html -qsirecon/logs/CITATION.md -qsirecon/logs/CITATION.tex -qsirecon/sub-ABCD -qsirecon/sub-ABCD.html qsirecon qsirecon-TORTOISE qsirecon-TORTOISE/sub-ABCD @@ -25,4 +22,7 @@ qsirecon-TORTOISE/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_mode qsirecon-TORTOISE/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-tensor_mdp-li_dwimap.nii.gz qsirecon-TORTOISE/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-tensor_mdp-rd_dwimap.nii.gz qsirecon-TORTOISE/sub-ABCD/dwi/sub-ABCD_acq-10per000_space-T1w_desc-preproc_model-tensor_mfp-AM_dwimap.nii.gz +qsirecon/dwiqc.json qsirecon/sub-ABCD +qsirecon/sub-ABCD.html +sub-ABCD diff --git a/qsirecon/tests/test_cli.py b/qsirecon/tests/test_cli.py index 4a2514da..5bb5572b 100644 --- a/qsirecon/tests/test_cli.py +++ b/qsirecon/tests/test_cli.py @@ -52,11 +52,8 @@ def test_mrtrix_singleshell_ss3t_act(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=mrtrix_singleshell_ss3t_ACT-fast", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -93,11 +90,8 @@ def test_mrtrix_singleshell_ss3t_noact(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=mrtrix_singleshell_ss3t_noACT", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -134,11 +128,8 @@ def test_amico_noddi(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=amico_noddi", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -176,11 +167,8 @@ def test_autotrack(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=dsi_studio_autotrack", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -221,10 +209,7 @@ def test_dipy_mapmri(data_dir, output_dir, working_dir): "participant", f"-w={work_dir}", "--sloppy", - f"--recon-input={dataset_dir}", "--recon-spec=dipy_mapmri", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -265,10 +250,7 @@ def test_dipy_dki(data_dir, output_dir, working_dir): "participant", f"-w={work_dir}", "--sloppy", - f"--recon-input={dataset_dir}", "--recon-spec=dipy_dki", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -298,10 +280,8 @@ def test_scalar_mapper(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=test_scalar_maps", - "--recon-only", "--output-resolution=3.5", "--nthreads=1", ] @@ -336,11 +316,8 @@ def test_pyafq_recon_external_trk(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=mrtrix_multishell_msmt_pyafq_tractometry", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -373,11 +350,8 @@ def test_pyafq_recon_full(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=pyafq_tractometry", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -414,11 +388,8 @@ def test_mrtrix3_recon(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=mrtrix_multishell_msmt_ACT-fast", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -448,11 +419,8 @@ def test_tortoise_recon(data_dir, output_dir, working_dir): out_dir, "participant", f"-w={work_dir}", - f"--recon-input={dataset_dir}", "--sloppy", "--recon-spec=TORTOISE", - "--recon-only", - "--output-resolution=5", ] _run_and_generate(TEST_NAME, parameters, test_main=True) @@ -484,7 +452,7 @@ def _run_and_generate(test_name, parameters, test_main=True): retval = build_workflow(config_file, retval={}) qsirecon_wf = retval["workflow"] qsirecon_wf.run() - write_derivative_description(config.execution.fmri_dir, config.execution.qsirecon_dir) + write_derivative_description(config.execution.fmri_dir, config.execution.output_dir) build_boilerplate(str(config_file), qsirecon_wf) session_list = ( @@ -494,7 +462,7 @@ def _run_and_generate(test_name, parameters, test_main=True): ) generate_reports( subject_list=config.execution.participant_label, - output_dir=config.execution.qsirecon_dir, + output_dir=config.execution.output_dir, run_uuid=config.execution.run_uuid, session_list=session_list, ) diff --git a/qsirecon/tests/utils.py b/qsirecon/tests/utils.py index a6d44c1f..a333aef8 100644 --- a/qsirecon/tests/utils.py +++ b/qsirecon/tests/utils.py @@ -73,10 +73,10 @@ def get_test_data_path(): return os.path.abspath(os.path.join(os.path.dirname(__file__), "data") + os.path.sep) -def check_generated_files(qsirecon_dir, output_list_file, optional_output_list_file): +def check_generated_files(output_dir, output_list_file, optional_output_list_file): """Compare files generated by qsirecon with a list of expected files.""" - found_files = sorted(glob(os.path.join(qsirecon_dir, "**/*"), recursive=True)) - found_files = [os.path.relpath(f, qsirecon_dir) for f in found_files] + found_files = sorted(glob(os.path.join(output_dir, "**/*"), recursive=True)) + found_files = [os.path.relpath(f, output_dir) for f in found_files] # Ignore figures found_files = sorted(list(set([f for f in found_files if "figures" not in f]))) diff --git a/qsirecon/utils/sentry.py b/qsirecon/utils/sentry.py index 86f9b05d..a1d138ba 100644 --- a/qsirecon/utils/sentry.py +++ b/qsirecon/utils/sentry.py @@ -65,7 +65,6 @@ "participant_label", "bids_database_dir", "bids_filter_file", - "recon_input", "use_plugin", "fs_license_file", "work_dir", diff --git a/qsirecon/workflows/base.py b/qsirecon/workflows/base.py index 915ad6ed..8a4e0ce7 100644 --- a/qsirecon/workflows/base.py +++ b/qsirecon/workflows/base.py @@ -30,32 +30,26 @@ def init_qsirecon_wf(): - """ - This workflow organizes the execution of qsirecon, with a sub-workflow for - each subject. + """Organize the execution of qsirecon, with a sub-workflow for each subject. .. workflow:: :graph2use: orig :simple_form: yes from qsirecon.workflows.base import init_qsirecon_wf - wf = init_qsirecon_wf() - - - Parameters - + wf = init_qsirecon_wf() """ ver = Version(config.environment.version) qsirecon_wf = Workflow(name=f"qsirecon_{ver.major}_{ver.minor}_wf") qsirecon_wf.base_dir = config.execution.work_dir - if config.workflow.recon_input_pipeline not in ("qsirecon", "ukb"): + if config.workflow.recon_input_pipeline not in ("qsiprep", "ukb"): raise NotImplementedError( f"{config.workflow.recon_input_pipeline} is not supported as recon-input yet." ) - if config.workflow.recon_input_pipeline == "qsirecon": + if config.workflow.recon_input_pipeline == "qsiprep": # This should work for --recon-input as long as the same dataset is in bids_dir # or if the call is doing preproc+recon to_recon_list = config.execution.participant_label @@ -72,7 +66,7 @@ def init_qsirecon_wf(): single_subject_wf = init_single_subject_recon_wf(subject_id=subject_id) single_subject_wf.config["execution"]["crashdump_dir"] = str( - config.execution.qsirecon_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid + config.execution.output_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid ) for node in single_subject_wf._get_all_nodes(): node.config = deepcopy(single_subject_wf.config) @@ -80,7 +74,7 @@ def init_qsirecon_wf(): # Dump a copy of the config file into the log directory log_dir = ( - config.execution.qsirecon_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid + config.execution.output_dir / f"sub-{subject_id}" / "log" / config.execution.run_uuid ) log_dir.mkdir(exist_ok=True, parents=True) config.to_filename(log_dir / "qsirecon.toml") @@ -89,21 +83,20 @@ def init_qsirecon_wf(): def init_single_subject_recon_wf(subject_id): - """ - This workflow organizes the reconstruction pipeline for a single subject. + """Organize the reconstruction pipeline for a single subject. + Reconstruction is performed using a separate workflow for each dwi series. Parameters - - subject_id : str - Single subject label - + ---------- + subject_id : str + Single subject label """ from ..interfaces.ingress import QsiReconDWIIngress, UKBioBankDWIIngress from ..interfaces.interchange import ( ReconWorkflowInputs, anatomical_workflow_outputs, - qsirecon_output_names, + qsiprep_output_names, recon_workflow_anatomical_input_fields, recon_workflow_input_fields, ) @@ -147,9 +140,9 @@ def init_single_subject_recon_wf(subject_id): atlas_names = spec.get("atlases", []) needs_t1w_transform = spec_needs_to_template_transform(spec) - # This is here because qsirecon currently only makes one anatomical result per subject + # This is here because qsiprep currently only makes one anatomical result per subject # regardless of sessions. So process it on its - if config.workflow.recon_input_pipeline == "qsirecon": + if config.workflow.recon_input_pipeline == "qsiprep": anat_ingress_node, available_anatomical_data = init_highres_recon_anatomical_wf( subject_id=subject_id, extras_to_make=spec.get("anatomical", []), @@ -173,7 +166,7 @@ def init_single_subject_recon_wf(subject_id): wf_name = _get_wf_name(dwi_file) # Get the preprocessed DWI and all the related preprocessed images - if config.workflow.recon_input_pipeline == "qsirecon": + if config.workflow.recon_input_pipeline == "qsiprep": dwi_ingress_nodes[dwi_file] = pe.Node( QsiReconDWIIngress(dwi_file=dwi_file), name=wf_name + "_ingressed_dwi_data" ) @@ -224,11 +217,11 @@ def init_single_subject_recon_wf(subject_id): workflow.connect([ # The dwi data (dwi_ingress_nodes[dwi_file], recon_full_inputs[dwi_file], [ - (trait, trait) for trait in qsirecon_output_names]), + (trait, trait) for trait in qsiprep_output_names]), # Session-specific anatomical data (dwi_ingress_nodes[dwi_file], dwi_individual_anatomical_wfs[dwi_file], - [(trait, "inputnode." + trait) for trait in qsirecon_output_names]), + [(trait, "inputnode." + trait) for trait in qsiprep_output_names]), # subject dwi-specific anatomical to a special node in recon_full_inputs so # we have a record of what went in. Otherwise it would be lost in an IdentityInterface @@ -239,7 +232,7 @@ def init_single_subject_recon_wf(subject_id): (recon_full_inputs[dwi_file], dwi_recon_wfs[dwi_file], [(trait, "inputnode." + trait) for trait in recon_workflow_input_fields]), - (anat_ingress_node if config.workflow.recon_input_pipeline == "qsirecon" + (anat_ingress_node if config.workflow.recon_input_pipeline == "qsiprep" else anat_ingress_nodes[dwi_file], dwi_individual_anatomical_wfs[dwi_file], [(f"outputnode.{trait}", f"inputnode.{trait}") @@ -308,25 +301,12 @@ def _get_iterable_dwi_inputs(subject_id): """ from ..utils.ingress import create_ukb_layout - recon_input_directory = config.execution.recon_input - if config.workflow.recon_input_pipeline == "qsirecon": - # If recon_input is specified without qsirecon, check if we can find the subject dir - if not (recon_input_directory / f"sub-{subject_id}").exists(): - config.loggers.workflow.info( - "%s not in %s, trying recon_input=%s", - subject_id, - recon_input_directory, - recon_input_directory / "qsirecon", - ) - - recon_input_directory = recon_input_directory / "qsirecon" - if not (recon_input_directory / f"sub-{subject_id}").exists(): - raise Exception( - "Unable to find subject directory in %s or %s" - % (config.execution.recon_input, recon_input_directory) - ) + dwi_dir = config.execution.bids_dir + if config.workflow.recon_input_pipeline == "qsiprep": + if not (dwi_dir / f"sub-{subject_id}").exists(): + raise Exception(f"Unable to find subject directory in {config.execution.bids_dir}") - layout = BIDSLayout(recon_input_directory, validate=False, absolute_paths=True) + layout = BIDSLayout(dwi_dir, validate=False, absolute_paths=True) # Get all the output files that are in this space dwi_files = [ f.path @@ -335,7 +315,7 @@ def _get_iterable_dwi_inputs(subject_id): ) if "space-T1w" in f.filename ] - config.loggers.workflow.info("found %s in %s", dwi_files, recon_input_directory) + config.loggers.workflow.info("found %s in %s", dwi_files, dwi_dir) return [{"bids_dwi_file": dwi_file} for dwi_file in dwi_files] if config.workflow.recon_input_pipeline == "ukb": diff --git a/qsirecon/workflows/recon/anatomical.py b/qsirecon/workflows/recon/anatomical.py index 3c439058..14abd0ea 100644 --- a/qsirecon/workflows/recon/anatomical.py +++ b/qsirecon/workflows/recon/anatomical.py @@ -30,7 +30,7 @@ from ...interfaces.interchange import ( FS_FILES_TO_REGISTER, anatomical_workflow_outputs, - qsirecon_highres_anatomical_ingressed_fields, + qsiprep_highres_anatomical_ingressed_fields, recon_workflow_input_fields, ) from ...interfaces.mrtrix import GenerateMasked5tt, ITKTransformConvert, TransformHeader @@ -82,19 +82,19 @@ def init_highres_recon_anatomical_wf( ) workflow.__desc__ = "" # "Gather" the input data. ``status`` is a dict that reflects which anatomical data - # are present. The anat_ingress_node is a nipype node that ensures that qsirecon-style - # anatomical data is available. In the case where ``pipeline_source`` is not "qsirecon", - # the data is converted in this node to be qsirecon-like. + # are present. The anat_ingress_node is a nipype node that ensures that qsiprep-style + # anatomical data is available. In the case where ``pipeline_source`` is not "qsiprep", + # the data is converted in this node to be qsiprep-like. pipeline_source = config.workflow.recon_input_pipeline freesurfer_dir = config.execution.freesurfer_input - if pipeline_source == "qsirecon": - anat_ingress_node, status = gather_qsirecon_anatomical_data(subject_id) + if pipeline_source == "qsiprep": + anat_ingress_node, status = gather_qsiprep_anatomical_data(subject_id) elif pipeline_source == "ukb": anat_ingress_node, status = gather_ukb_anatomical_data(subject_id) else: raise Exception(f"Unknown pipeline source '{pipeline_source}'") anat_ingress_node.inputs.infant_mode = config.workflow.infant - if needs_t1w_transform and not status["has_qsirecon_t1w_transforms"]: + if needs_t1w_transform and not status["has_qsiprep_t1w_transforms"]: raise Exception("Cannot compute to-template") # If there is no high-res anat data in the inputs there may still be an image available @@ -103,7 +103,7 @@ def init_highres_recon_anatomical_wf( status["has_freesurfer"] = subject_freesurfer_path is not None # If no high-res are available, we're done here - if not status["has_qsirecon_t1w"] and subject_freesurfer_path is None: + if not status["has_qsiprep_t1w"] and subject_freesurfer_path is None: config.loggers.workflow.warning( f"No high-res anatomical data available directly in recon inputs for {subject_id}." ) @@ -118,7 +118,7 @@ def init_highres_recon_anatomical_wf( ) workflow.connect([ (anat_ingress_node, outputnode, - [(name, name) for name in qsirecon_highres_anatomical_ingressed_fields])]) # fmt:skip + [(name, name) for name in qsiprep_highres_anatomical_ingressed_fields])]) # fmt:skip # grab un-coregistered freesurfer images later use if subject_freesurfer_path is not None: @@ -155,11 +155,11 @@ def init_highres_recon_anatomical_wf( name="create_5tt_hsvs", n_procs=config.nipype.omp_nthreads, ) - ds_qsirecon_5tt_hsvs = pe.Node( + ds_qsiprep_5tt_hsvs = pe.Node( ReconDerivativesDataSink( atlas="hsvs", space="T1w", suffix="dseg", qsirecon_suffix="anat" ), - name="ds_qsirecon_5tt_hsvs", + name="ds_qsiprep_5tt_hsvs", run_without_submitting=True, ) ds_fs_5tt_hsvs = pe.Node( @@ -171,39 +171,39 @@ def init_highres_recon_anatomical_wf( ) workflow.connect([ (anat_ingress_node, ds_fs_5tt_hsvs, [("t1_preproc", "source_file")]), - (anat_ingress_node, ds_qsirecon_5tt_hsvs, [("t1_preproc", "source_file")]), + (anat_ingress_node, ds_qsiprep_5tt_hsvs, [("t1_preproc", "source_file")]), (create_5tt_hsvs, outputnode, [('out_file', 'fs_5tt_hsvs')]), (create_5tt_hsvs, ds_fs_5tt_hsvs, [("out_file", "in_file")]), ]) # fmt:skip # Transform the 5tt image so it's registered to the QSIRecon AC-PC T1w - if status["has_qsirecon_t1w"]: + if status["has_qsiprep_t1w"]: config.loggers.workflow.info( "HSVS 5tt imaged will be registered to the QSIRecon T1w image." ) - status["has_qsirecon_5tt_hsvs"] = True - register_fs_to_qsirecon_wf = init_register_fs_to_qsirecon_wf( - use_qsirecon_reference_mask=True + status["has_qsiprep_5tt_hsvs"] = True + register_fs_to_qsiprep_wf = init_register_fs_to_qsiprep_wf( + use_qsiprep_reference_mask=True ) apply_header_to_5tt = pe.Node(TransformHeader(), name="apply_header_to_5tt") workflow.connect([ - (anat_ingress_node, register_fs_to_qsirecon_wf, [ - ("t1_preproc", "inputnode.qsirecon_reference_image"), - ("t1_brain_mask", "inputnode.qsirecon_reference_mask")]), - (fs_source, register_fs_to_qsirecon_wf, [ + (anat_ingress_node, register_fs_to_qsiprep_wf, [ + ("t1_preproc", "inputnode.qsiprep_reference_image"), + ("t1_brain_mask", "inputnode.qsiprep_reference_mask")]), + (fs_source, register_fs_to_qsiprep_wf, [ (field, "inputnode." + field) for field in FS_FILES_TO_REGISTER]), - (register_fs_to_qsirecon_wf, outputnode, [ - ("outputnode.fs_to_qsirecon_transform_mrtrix", - "fs_to_qsirecon_transform_mrtrix"), - ("outputnode.fs_to_qsirecon_transform_itk", - "fs_to_qsirecon_transform_itk")] + [ + (register_fs_to_qsiprep_wf, outputnode, [ + ("outputnode.fs_to_qsiprep_transform_mrtrix", + "fs_to_qsiprep_transform_mrtrix"), + ("outputnode.fs_to_qsiprep_transform_itk", + "fs_to_qsiprep_transform_itk")] + [ ("outputnode." + field, field) for field in FS_FILES_TO_REGISTER]), (create_5tt_hsvs, apply_header_to_5tt, [("out_file", "in_image")]), - (register_fs_to_qsirecon_wf, apply_header_to_5tt, [ - ("outputnode.fs_to_qsirecon_transform_mrtrix", "transform_file")]), + (register_fs_to_qsiprep_wf, apply_header_to_5tt, [ + ("outputnode.fs_to_qsiprep_transform_mrtrix", "transform_file")]), (apply_header_to_5tt, outputnode, [ - ("out_image", "qsirecon_5tt_hsvs")]), - (apply_header_to_5tt, ds_qsirecon_5tt_hsvs, [("out_image", "in_file")]), + ("out_image", "qsiprep_5tt_hsvs")]), + (apply_header_to_5tt, ds_qsiprep_5tt_hsvs, [("out_image", "in_file")]), ]) # fmt:skip workflow.__desc__ += "A hybrid surface/volume segmentation was created [Smith 2020]." @@ -221,16 +221,16 @@ def gather_ukb_anatomical_data(subject_id): """ status = { - "has_qsirecon_5tt_hsvs": False, + "has_qsiprep_5tt_hsvs": False, "has_freesurfer_5tt_hsvs": False, "has_freesurfer": False, } - recon_input_dir = config.execution.recon_input + recon_input_dir = config.execution.bids_dir # Check to see if we have a T1w preprocessed by QSIRecon missing_ukb_anats = check_ukb_anatomical_outputs(recon_input_dir) has_t1w = not missing_ukb_anats - status["has_qsirecon_t1w"] = has_t1w + status["has_qsiprep_t1w"] = has_t1w if missing_ukb_anats: config.loggers.workflow.info(f"Missing T1w from UKB session: {recon_input_dir}") else: @@ -242,15 +242,15 @@ def gather_ukb_anatomical_data(subject_id): # I couldn't figure out how to convert UKB transforms to ants. So # they're not available for recon workflows for now - status["has_qsirecon_t1w_transforms"] = False + status["has_qsiprep_t1w_transforms"] = False config.loggers.workflow.info("QSIRecon can't read FNIRT transforms from UKB at this time.") return anat_ingress, status -def gather_qsirecon_anatomical_data(subject_id): +def gather_qsiprep_anatomical_data(subject_id): """ - Gathers the anatomical data from a qsirecon input and finds which files are available. + Gathers the anatomical data from a QSIPrep input and finds which files are available. Parameters @@ -260,36 +260,36 @@ def gather_qsirecon_anatomical_data(subject_id): """ status = { - "has_qsirecon_5tt_hsvs": False, + "has_qsiprep_5tt_hsvs": False, "has_freesurfer_5tt_hsvs": False, "has_freesurfer": False, } - recon_input_dir = config.execution.recon_input + recon_input_dir = config.execution.bids_dir # Check to see if we have a T1w preprocessed by QSIRecon - missing_qsirecon_anats = check_qsirecon_anatomical_outputs(recon_input_dir, subject_id, "T1w") - has_qsirecon_t1w = not missing_qsirecon_anats - status["has_qsirecon_t1w"] = has_qsirecon_t1w - if missing_qsirecon_anats: + missing_qsiprep_anats = check_qsiprep_anatomical_outputs(recon_input_dir, subject_id, "T1w") + has_qsiprep_t1w = not missing_qsiprep_anats + status["has_qsiprep_t1w"] = has_qsiprep_t1w + if missing_qsiprep_anats: config.loggers.workflow.info( - "Missing T1w QSIRecon outputs found: %s", " ".join(missing_qsirecon_anats) + "Missing T1w QSIRecon outputs found: %s", " ".join(missing_qsiprep_anats) ) else: config.loggers.workflow.info("Found usable QSIRecon-preprocessed T1w image and mask.") anat_ingress = pe.Node( QSIReconAnatomicalIngress(subject_id=subject_id, recon_input_dir=recon_input_dir), - name="qsirecon_anat_ingress", + name="qsiprep_anat_ingress", ) # Check if the T1w-to-MNI transforms are in the QSIRecon outputs - missing_qsirecon_transforms = check_qsirecon_anatomical_outputs( + missing_qsiprep_transforms = check_qsiprep_anatomical_outputs( recon_input_dir, subject_id, "transforms" ) - has_qsirecon_t1w_transforms = not missing_qsirecon_transforms - status["has_qsirecon_t1w_transforms"] = has_qsirecon_t1w_transforms + has_qsiprep_t1w_transforms = not missing_qsiprep_transforms + status["has_qsiprep_t1w_transforms"] = has_qsiprep_t1w_transforms - if missing_qsirecon_transforms: + if missing_qsiprep_transforms: config.loggers.workflow.info( - "Missing T1w QSIRecon outputs: %s", " ".join(missing_qsirecon_transforms) + "Missing T1w QSIRecon outputs: %s", " ".join(missing_qsiprep_transforms) ) return anat_ingress, status @@ -321,8 +321,8 @@ def _check_zipped_unzipped(path_to_check): return exists -def check_qsirecon_anatomical_outputs(recon_input_dir, subject_id, anat_type): - """Determines whether an aligned T1w exists in a qsirecon derivatives directory. +def check_qsiprep_anatomical_outputs(recon_input_dir, subject_id, anat_type): + """Determines whether an aligned T1w exists in a qsiprep derivatives directory. It is possible that: - ``--dwi-only`` was used, in which case there is NO T1w available @@ -366,20 +366,20 @@ def check_ukb_anatomical_outputs(recon_input_dir): return missing -def init_register_fs_to_qsirecon_wf( - use_qsirecon_reference_mask=False, name="register_fs_to_qsirecon_wf" +def init_register_fs_to_qsiprep_wf( + use_qsiprep_reference_mask=False, name="register_fs_to_qsiprep_wf" ): """Registers a T1w images from freesurfer to another image and transforms""" inputnode = pe.Node( niu.IdentityInterface( - fields=FS_FILES_TO_REGISTER + ["qsirecon_reference_image", "qsirecon_reference_mask"] + fields=FS_FILES_TO_REGISTER + ["qsiprep_reference_image", "qsiprep_reference_mask"] ), name="inputnode", ) outputnode = pe.Node( niu.IdentityInterface( fields=FS_FILES_TO_REGISTER - + ["fs_to_qsirecon_transform_itk", "fs_to_qsirecon_transform_mrtrix"] + + ["fs_to_qsiprep_transform_itk", "fs_to_qsiprep_transform_mrtrix"] ), name="outputnode", ) @@ -393,15 +393,15 @@ def init_register_fs_to_qsirecon_wf( ) # Register the brain to the QSIRecon reference - ants_settings = pkgrf("qsirecon", "data/freesurfer_to_qsirecon.json") - register_to_qsirecon = pe.Node( - ants.Registration(from_file=ants_settings), name="register_to_qsirecon" + ants_settings = pkgrf("qsirecon", "data/freesurfer_to_qsiprep.json") + register_to_qsiprep = pe.Node( + ants.Registration(from_file=ants_settings), name="register_to_qsiprep" ) # If there is a mask for the QSIRecon reference image, use it - if use_qsirecon_reference_mask: - workflow.connect(inputnode, "qsirecon_reference_mask", - register_to_qsirecon, "fixed_image_masks") # fmt:skip + if use_qsiprep_reference_mask: + workflow.connect(inputnode, "qsiprep_reference_mask", + register_to_qsiprep, "fixed_image_masks") # fmt:skip # The more recent ANTs mat format isn't compatible with transformconvert. # So convert it to ANTs text format with ConvertTransform @@ -414,7 +414,7 @@ def init_register_fs_to_qsirecon_wf( ITKTransformConvert(), name="convert_ants_to_mrtrix_transform" ) - # Adjust the headers of all the input images so they're aligned to the qsirecon ref + # Adjust the headers of all the input images so they're aligned to the qsiprep ref transform_nodes = {} for image_name in FS_FILES_TO_REGISTER: transform_nodes[image_name] = pe.Node(TransformHeader(), name="transform_" + image_name) @@ -428,18 +428,18 @@ def init_register_fs_to_qsirecon_wf( workflow.connect([ (inputnode, convert_fs_brain, [ ("brain", "in_file")]), - (inputnode, register_to_qsirecon, [ - ("qsirecon_reference_image", "fixed_image")]), - (convert_fs_brain, register_to_qsirecon, [ + (inputnode, register_to_qsiprep, [ + ("qsiprep_reference_image", "fixed_image")]), + (convert_fs_brain, register_to_qsiprep, [ ("out_file", "moving_image")]), - (register_to_qsirecon, convert_ants_transform, [ + (register_to_qsiprep, convert_ants_transform, [ (("forward_transforms", _get_first), "in_transform")]), - (register_to_qsirecon, outputnode, [ - ("composite_transform", "fs_to_qsirecon_transform_itk")]), + (register_to_qsiprep, outputnode, [ + ("composite_transform", "fs_to_qsiprep_transform_itk")]), (convert_ants_transform, convert_ants_to_mrtrix_transform, [ ("out_transform", "in_transform")]), (convert_ants_to_mrtrix_transform, outputnode, - [("out_transform", "fs_to_qsirecon_transform_mrtrix")]) + [("out_transform", "fs_to_qsiprep_transform_mrtrix")]) ]) # fmt:skip return workflow @@ -447,11 +447,11 @@ def init_register_fs_to_qsirecon_wf( def init_dwi_recon_anatomical_workflow( atlas_names, - has_qsirecon_5tt_hsvs, + has_qsiprep_5tt_hsvs, needs_t1w_transform, has_freesurfer_5tt_hsvs, - has_qsirecon_t1w, - has_qsirecon_t1w_transforms, + has_qsiprep_t1w, + has_qsiprep_t1w_transforms, has_freesurfer, extras_to_make, name, @@ -469,10 +469,10 @@ def init_dwi_recon_anatomical_workflow( Parameters: =========== - has_qsirecon_5tt_hsvs: + has_qsiprep_5tt_hsvs: has_freesurfer_5tt_hsvs: True, - has_qsirecon_t1w: - has_qsirecon_t1w_transforms: True} + has_qsiprep_t1w: + has_qsiprep_t1w_transforms: True} """ # Inputnode holds data from the T1w-based anatomical workflow inputnode = pe.Node( @@ -509,10 +509,10 @@ def _exchange_fields(fields): def _get_status(): return { - "has_qsirecon_5tt_hsvs": has_qsirecon_5tt_hsvs, + "has_qsiprep_5tt_hsvs": has_qsiprep_5tt_hsvs, "has_freesurfer_5tt_hsvs": has_freesurfer_5tt_hsvs, - "has_qsirecon_t1w": has_qsirecon_t1w, - "has_qsirecon_t1w_transforms": has_qsirecon_t1w_transforms, + "has_qsiprep_t1w": has_qsiprep_t1w, + "has_qsiprep_t1w_transforms": has_qsiprep_t1w_transforms, } reference_grid_wf = init_output_grid_wf() @@ -525,7 +525,7 @@ def _get_status(): ]) # fmt:skip # Missing Freesurfer AND QSIRecon T1ws, or the user wants a DWI-based mask - if not (has_qsirecon_t1w or has_freesurfer) or prefer_dwi_mask: + if not (has_qsiprep_t1w or has_freesurfer) or prefer_dwi_mask: desc += ( "No T1w weighted images were available for masking, so a mask " "was estimated based on the b=0 images in the DWI data itself." @@ -544,8 +544,8 @@ def _get_status(): # No data from QSIRecon was available, BUT we have freesurfer! register it and # get the brain, masks and possibly a to-MNI transform. - # --> If has_freesurfer AND has qsirecon_t1w, the necessary files were created earlier - elif has_freesurfer and not has_qsirecon_t1w: + # --> If has_freesurfer AND has qsiprep_t1w, the necessary files were created earlier + elif has_freesurfer and not has_qsiprep_t1w: fs_source = pe.Node( nio.FreeSurferSource(subjects_dir=config.execution.fs_subjects_dir), name="fs_source" ) @@ -562,70 +562,70 @@ def _get_status(): + [ "t1_brain_mask", "t1_preproc", - "fs_to_qsirecon_transform_mrtrix", - "fs_to_qsirecon_transform_itk", + "fs_to_qsiprep_transform_mrtrix", + "fs_to_qsiprep_transform_itk", ] ) # Perform the registration and connect the outputs to buffernode # NOTE: using FreeSurfer "brain" image as t1_preproc and aseg as the brainmask - has_qsirecon_t1w = True - register_fs_to_qsirecon_wf = init_register_fs_to_qsirecon_wf( - use_qsirecon_reference_mask=False + has_qsiprep_t1w = True + register_fs_to_qsiprep_wf = init_register_fs_to_qsiprep_wf( + use_qsiprep_reference_mask=False ) workflow.connect([ (inputnode, fs_source, [("subject_id", "subject_id")]), - (inputnode, register_fs_to_qsirecon_wf, [ - ("dwi_ref", "inputnode.qsirecon_reference_image")]), - (fs_source, register_fs_to_qsirecon_wf, [ + (inputnode, register_fs_to_qsiprep_wf, [ + ("dwi_ref", "inputnode.qsiprep_reference_image")]), + (fs_source, register_fs_to_qsiprep_wf, [ (field, "inputnode." + field) for field in FS_FILES_TO_REGISTER]), - (register_fs_to_qsirecon_wf, buffernode, [ + (register_fs_to_qsiprep_wf, buffernode, [ ("outputnode.brain", "t1_preproc"), ("outputnode.aseg", "t1_brain_mask"), - ("outputnode.fs_to_qsirecon_transform_mrtrix", - "fs_to_qsirecon_transform_mrtrix"), - ("outputnode.fs_to_qsirecon_transform_itk", - "fs_to_qsirecon_transform_itk")] + [ + ("outputnode.fs_to_qsiprep_transform_mrtrix", + "fs_to_qsiprep_transform_mrtrix"), + ("outputnode.fs_to_qsiprep_transform_itk", + "fs_to_qsiprep_transform_itk")] + [ ("outputnode." + field, field) for field in FS_FILES_TO_REGISTER]), ]) # fmt:skip # Do we need to transform the 5tt hsvs from fsnative? - if "mrtrix_5tt_hsvs" in extras_to_make and not has_qsirecon_5tt_hsvs: + if "mrtrix_5tt_hsvs" in extras_to_make and not has_qsiprep_5tt_hsvs: # Transform the 5tt image so it's registered to the QSIRecon AC-PC T1w config.loggers.workflow.info( "HSVS 5tt imaged will be registered to the " "QSIRecon dwiref image." ) - _exchange_fields(["qsirecon_5tt_hsvs"]) + _exchange_fields(["qsiprep_5tt_hsvs"]) if not has_freesurfer_5tt_hsvs: raise Exception("The 5tt image in fsnative should have been created by now") apply_header_to_5tt_hsvs = pe.Node(TransformHeader(), name="apply_header_to_5tt_hsvs") - ds_qsirecon_5tt_hsvs = pe.Node( + ds_qsiprep_5tt_hsvs = pe.Node( ReconDerivativesDataSink( atlas="hsvs", suffix="dseg", qsirecon_suffix="anat", ), - name="ds_qsirecon_5tt_hsvs", + name="ds_qsiprep_5tt_hsvs", run_without_submitting=True, ) workflow.connect([ (inputnode, apply_header_to_5tt_hsvs, [("fs_5tt_hsvs", "in_image")]), (apply_header_to_5tt_hsvs, buffernode, [ - ("out_image", "qsirecon_5tt_hsvs")]), - (apply_header_to_5tt_hsvs, ds_qsirecon_5tt_hsvs, [("out_image", "in_file")]), + ("out_image", "qsiprep_5tt_hsvs")]), + (apply_header_to_5tt_hsvs, ds_qsiprep_5tt_hsvs, [("out_image", "in_file")]), ]) # fmt:skip desc += "A hybrid surface/volume segmentation was created [Smith 2020]." # If we have transforms to the template space, use them to get ROIs/atlases - # if not has_qsirecon_t1w_transforms and has_qsirecon_t1w: + # if not has_qsiprep_t1w_transforms and has_qsiprep_t1w: # desc += "In order to warp brain parcellations from template space into " \ # "alignment with the DWI data, the DWI-aligned FreeSurfer brain was " \ # "registered to template space. " - # # We now have qsirecon t1w and transforms!! - # has_qsirecon_t1w = has_qsirecon_t1w_transforms = True + # # We now have qsiprep t1w and transforms!! + # has_qsiprep_t1w = has_qsiprep_t1w_transforms = True # # Calculate the transforms here: - # has_qsirecon_t1w_transforms = True + # has_qsiprep_t1w_transforms = True # _exchange_fields(['t1_2_mni_forward_transform', 't1_2_mni_reverse_transform']) # t1_2_mni = pe.Node( # get_t1w_registration_node( @@ -642,7 +642,7 @@ def _get_status(): # Check the status of the T1wACPC-to-template transforms if needs_t1w_transform: - if has_qsirecon_t1w_transforms: + if has_qsiprep_t1w_transforms: config.loggers.workflow.info("Found T1w-to-template transforms from QSIRecon") desc += ( "T1w-based spatial normalization calculated during " @@ -657,7 +657,7 @@ def _get_status(): # Simply resample the T1w mask into the DWI resolution. This was the default # up to version 0.14.3 - if has_qsirecon_t1w and not prefer_dwi_mask: + if has_qsiprep_t1w and not prefer_dwi_mask: desc += "Brainmasks from {} were used in all " "subsequent reconstruction steps.".format( skull_strip_method ) @@ -676,7 +676,7 @@ def _get_status(): (resample_mask, buffernode, [("output_image", "dwi_mask")]) ]) # fmt:skip - if has_qsirecon_t1w_transforms: + if has_qsiprep_t1w_transforms: config.loggers.workflow.info("Transforming ODF ROIs into DWI space for visual report.") # Resample ROI targets to DWI resolution for ODF plotting crossing_rois_file = pkgrf("qsirecon", "data/crossing_rois.nii.gz") @@ -782,7 +782,7 @@ def _get_status(): workflow.connect( inputnode, 'dwi_file', workflow.get_node(node), 'source_file') # fmt:skip - if "mrtrix_5tt_hsv" in extras_to_make and not has_qsirecon_5tt_hsvs: + if "mrtrix_5tt_hsv" in extras_to_make and not has_qsiprep_5tt_hsvs: raise Exception("Unable to create a 5tt HSV image given input data.") # Directly connect anything from the inputs that we haven't created here diff --git a/qsirecon/workflows/recon/dipy.py b/qsirecon/workflows/recon/dipy.py index 65a3fa36..5abd3022 100644 --- a/qsirecon/workflows/recon/dipy.py +++ b/qsirecon/workflows/recon/dipy.py @@ -196,7 +196,7 @@ def init_dipy_brainsuite_shore_recon_wf( ]) # fmt:skip # Plot targeted regions - if available_anatomical_data["has_qsirecon_t1w_transforms"] and plot_reports: + if available_anatomical_data["has_qsiprep_t1w_transforms"] and plot_reports: ds_report_odfs = pe.Node( ReconDerivativesDataSink(extension=".png", desc="3dSHOREODF", suffix="odfs"), name="ds_report_odfs", @@ -481,7 +481,7 @@ def init_dipy_mapmri_recon_wf( ]) # fmt:skip # Plot targeted regions - if available_anatomical_data["has_qsirecon_t1w_transforms"] and plot_reports: + if available_anatomical_data["has_qsiprep_t1w_transforms"] and plot_reports: ds_report_odfs = pe.Node( ReconDerivativesDataSink(extension=".png", desc="MAPLMRIODF", suffix="odfs"), name="ds_report_odfs", diff --git a/qsirecon/workflows/recon/dsi_studio.py b/qsirecon/workflows/recon/dsi_studio.py index 7590565f..ec54a790 100644 --- a/qsirecon/workflows/recon/dsi_studio.py +++ b/qsirecon/workflows/recon/dsi_studio.py @@ -116,7 +116,7 @@ def init_dsi_studio_recon_wf( (plot_peaks, ds_report_peaks, [('peak_report', 'in_file')]) ]) # fmt:skip # Plot targeted regions - if available_anatomical_data["has_qsirecon_t1w_transforms"]: + if available_anatomical_data["has_qsiprep_t1w_transforms"]: ds_report_odfs = pe.Node( ReconDerivativesDataSink(extension=".png", desc="GQIODF", suffix="odfs"), name="ds_report_odfs", diff --git a/qsirecon/workflows/recon/mrtrix.py b/qsirecon/workflows/recon/mrtrix.py index 7afd1f2e..8b6497a1 100644 --- a/qsirecon/workflows/recon/mrtrix.py +++ b/qsirecon/workflows/recon/mrtrix.py @@ -58,7 +58,7 @@ def init_mrtrix_csd_recon_wf( *Default qsirecon inputs* - qsirecon_5tt_hsvs + qsiprep_5tt_hsvs A hybrid surface volume segmentation 5tt image aligned with the QSIRecon T1w @@ -170,7 +170,7 @@ def init_mrtrix_csd_recon_wf( if method_5tt == "hsvs": workflow.connect([ (inputnode, estimate_response, [ - ('qsirecon_5tt_hsvs', 'mtt_file')]) + ('qsiprep_5tt_hsvs', 'mtt_file')]) ]) # fmt:skip else: raise Exception("Unrecognized 5tt method: " + method_5tt) @@ -249,7 +249,7 @@ def init_mrtrix_csd_recon_wf( ]) # fmt:skip # Plot targeted regions - if available_anatomical_data["has_qsirecon_t1w_transforms"]: + if available_anatomical_data["has_qsiprep_t1w_transforms"]: ds_report_odfs = pe.Node( ReconDerivativesDataSink(extension=".png", desc="wmFOD", suffix="odfs"), name="ds_report_odfs", @@ -559,7 +559,7 @@ def init_mrtrix_tractography_wf( if use_5tt: if method_5tt == "hsvs": - connect_5tt = "qsirecon_5tt_hsvs" + connect_5tt = "qsiprep_5tt_hsvs" else: raise Exception("Unrecognized 5tt method: " + method_5tt) workflow.connect(inputnode, connect_5tt, diff --git a/qsirecon/workflows/reports.py b/qsirecon/workflows/reports.py index 49bff640..2708a61c 100644 --- a/qsirecon/workflows/reports.py +++ b/qsirecon/workflows/reports.py @@ -19,7 +19,7 @@ from .. import config from ..interfaces import DerivativesDataSink from ..interfaces.ingress import QsiReconDWIIngress -from ..interfaces.interchange import qsirecon_output_names, recon_workflow_input_fields +from ..interfaces.interchange import qsiprep_output_names, recon_workflow_input_fields from ..interfaces.reports import InteractiveReport from ..utils.bids import collect_data @@ -27,8 +27,7 @@ def init_json_preproc_report_wf(subject_list): - """ - This workflow creates a json report for the dmriprep-viewer. + """Create a json report for the dmriprep-viewer. .. workflow:: :graph2use: orig @@ -42,16 +41,15 @@ def init_json_preproc_report_wf(subject_list): output_dir='.') - Parameters: - - subject_list : list - List of subject labels - work_dir : str - Directory in which to store workflow execution state and temporary - files - output_dir : str - Directory in which to save derivatives - + Parameters + ---------- + subject_list : list + List of subject labels + work_dir : str + Directory in which to store workflow execution state and temporary + files + output_dir : str + Directory in which to save derivatives """ work_dir = config.execution.work_dir output_dir = config.execution.output_dir @@ -138,7 +136,7 @@ def init_single_subject_json_report_wf(subject_id, name): workflow.connect([ (scans_iter, qsirecon_preprocessed_dwi_data, ([('dwi_file', 'dwi_file')])), (qsirecon_preprocessed_dwi_data, inputnode, [ - (trait, trait) for trait in qsirecon_output_names]), + (trait, trait) for trait in qsiprep_output_names]), (inputnode, interactive_report, [ ('dwi_file', 'processed_dwi_file'), ('confounds_file', 'confounds_file'), diff --git a/tests/get_data.py b/tests/get_data.py index b42b15f8..8dfab30d 100644 --- a/tests/get_data.py +++ b/tests/get_data.py @@ -80,7 +80,6 @@ def get_default_cli_args(): use_syn_sdc=False, force_syn=False, verbose_count=2, - recon_input=None, recon_spec=None, use_plugin=None, nthreads=1, diff --git a/tests/opts_tests.py b/tests/opts_tests.py index 0dab1bdf..c46e549d 100644 --- a/tests/opts_tests.py +++ b/tests/opts_tests.py @@ -1,6 +1,6 @@ import pytest import os -from qsirecon.cli.run import set_freesurfer_license, validate_bids, get_parser +from qsirecon.cli.run import set_freesurfer_license, get_parser base_args = "bids out participant --output_resolution 2.3" @@ -24,15 +24,6 @@ def test_required(): assert pa_fail.value.code == 2 -def test_required_recononly(monkeypatch): - # dont need output_resolution if we have recon_only - base_args = "bids out participant --recon_only" - args = base_args.split(' ') - # sys.argv used to set if output-res required - monkeypatch.setattr('qsirecon.cli.run.sys.argv', args) - get_parser().parse_args(args) - - def test_set_freesurfer_license(tmpdir): """test setting, precedence, and error if DNE""" # create temp file @@ -65,16 +56,3 @@ def test_set_freesurfer_license(tmpdir): opts = get_parser().parse_args(fsarg.split(' ')) set_freesurfer_license(opts) assert os.getenv('FS_LICENSE') == f'{lic3}' - - -@pytest.mark.parametrize("will_validate,opts_str", ( - (True, base_args), # run if base args - (False, base_args + " --skip_bids_validation"), # not if skipped - (False, base_args + " --recon-only"), # or recon all - (False, base_args + " --skip_bids_validation --recon-only") # or both -)) -def test_validate_bids(monkeypatch, opts_str, will_validate): - # from ..utils.bids import validate_input_dir - monkeypatch.setattr("qsirecon.utils.bids.validate_input_dir", lambda *kargs: True) - opts = get_parser().parse_args(opts_str.split(' ')) - assert will_validate == validate_bids(opts) diff --git a/wrapper/qsirecon_container/qsiprep_docker.py b/wrapper/qsirecon_container/qsiprep_docker.py index 1e6b92d6..7062a222 100644 --- a/wrapper/qsirecon_container/qsiprep_docker.py +++ b/wrapper/qsirecon_container/qsiprep_docker.py @@ -233,10 +233,6 @@ def get_parser(): "analysis_level", nargs="?", choices=["participant"], default="participant" ) - # For qsirecon - parser.add_argument( - "--recon-input", "--recon_input", required=False, action="store", type=os.path.abspath - ) # For eddy parser.add_argument( "--eddy-config", "--eddy_config", required=False, action="store", type=os.path.abspath @@ -430,9 +426,6 @@ def main(): main_args = ["/data", "/out", opts.analysis_level] if opts.bids_dir: command.extend(["-v", ":".join((opts.bids_dir, "/data", "ro"))]) - if opts.recon_input: - command.extend(["-v", ":".join((opts.recon_input, "/qsirecon-output", "ro"))]) - main_args.extend(["--recon-input", "/qsirecon-output"]) if opts.freesurfer_input: command.extend(["-v", ":".join((opts.freesurfer_input, "/sngl/freesurfer-input", "ro"))]) main_args.extend(["--freesurfer-input", "/sngl/freesurfer-input"]) diff --git a/wrapper/qsirecon_container/qsiprep_singularity.py b/wrapper/qsirecon_container/qsiprep_singularity.py index 536fe2dd..6bd8d3e1 100644 --- a/wrapper/qsirecon_container/qsiprep_singularity.py +++ b/wrapper/qsirecon_container/qsiprep_singularity.py @@ -216,10 +216,6 @@ def get_parser(): "analysis_level", nargs="?", choices=["participant"], default="participant" ) - # For qsirecon - parser.add_argument( - "--recon-input", "--recon_input", required=False, action="store", type=os.path.abspath - ) parser.add_argument( "--bids-filter-file", "--bids_filter_file", @@ -411,9 +407,6 @@ def main(): main_args.extend(["--fs-license-file", mounted_license]) if opts.bids_dir: command.extend(["-B", ":".join((opts.bids_dir, "/sngl/data"))]) - if opts.recon_input: - command.extend(["-B", ":".join((opts.recon_input, "/sngl/qsirecon-output"))]) - main_args.extend(["--recon-input", "/sngl/qsirecon-output"]) if opts.recon_spec: if os.path.exists(opts.recon_spec): spec_dir, spec_fname = op.split(opts.recon_spec) From 667c5dee841d0311286e2dce62937d9df3949064 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Tue, 13 Aug 2024 20:21:20 -0400 Subject: [PATCH 2/2] Drop unused modules, classes, and functions (#11) * Remove dwi, anatomical, and fieldmap workflows. * Remove lots of unused interfaces. * Update __init__.py * Remove more. * Do more. * Update __init__.py * Add init_output_grid_wf back in. * Remove QSIPrep-specific parameters and Config elements (#6) --------- Co-authored-by: Matt Cieslak --------- Co-authored-by: Matt Cieslak --- qsirecon/interfaces/__init__.py | 21 +- qsirecon/interfaces/anatomical.py | 235 +-- qsirecon/interfaces/ants.py | 144 -- qsirecon/interfaces/bids.py | 148 +- qsirecon/interfaces/confounds.py | 260 --- qsirecon/interfaces/connectivity.py | 73 - qsirecon/interfaces/dipy.py | 122 -- qsirecon/interfaces/dsi_studio.py | 141 -- qsirecon/interfaces/dwi_merge.py | 907 ---------- qsirecon/interfaces/eddy.py | 466 ----- qsirecon/interfaces/epi_fmap.py | 435 ----- qsirecon/interfaces/fmap.py | 1523 ----------------- qsirecon/interfaces/freesurfer.py | 593 ------- qsirecon/interfaces/gradients.py | 812 --------- qsirecon/interfaces/images.py | 478 ------ qsirecon/interfaces/itk.py | 276 --- qsirecon/interfaces/mrtrix.py | 307 ---- qsirecon/interfaces/nilearn.py | 481 ------ qsirecon/interfaces/niworkflows.py | 631 ------- qsirecon/interfaces/patch2self.py | 372 ---- qsirecon/interfaces/qc.py | 34 - qsirecon/interfaces/reports.py | 629 ------- qsirecon/interfaces/shoreline.py | 521 ------ qsirecon/interfaces/surf.py | 155 -- qsirecon/interfaces/tortoise.py | 489 ------ qsirecon/interfaces/utils.py | 196 --- qsirecon/utils/__init__.py | 1 - qsirecon/utils/bids.py | 91 - qsirecon/utils/bspline.py | 52 - qsirecon/utils/grouping.py | 1220 ------------- qsirecon/utils/ingress.py | 53 - qsirecon/utils/misc.py | 57 - qsirecon/utils/testing.py | 205 --- qsirecon/viz/utils.py | 51 - qsirecon/workflows/anatomical/__init__.py | 1 - qsirecon/workflows/anatomical/surface.py | 791 --------- qsirecon/workflows/anatomical/volume.py | 1199 ------------- qsirecon/workflows/dwi/__init__.py | 11 - qsirecon/workflows/dwi/base.py | 493 ------ qsirecon/workflows/dwi/confounds.py | 112 -- qsirecon/workflows/dwi/derivatives.py | 195 --- .../workflows/dwi/distortion_group_merge.py | 321 ---- qsirecon/workflows/dwi/finalize.py | 666 ------- qsirecon/workflows/dwi/fsl.py | 448 ----- qsirecon/workflows/dwi/hmc.py | 767 --------- qsirecon/workflows/dwi/hmc_sdc.py | 292 ---- qsirecon/workflows/dwi/intramodal_template.py | 439 ----- qsirecon/workflows/dwi/merge.py | 732 -------- qsirecon/workflows/dwi/pre_hmc.py | 271 --- qsirecon/workflows/dwi/qc.py | 185 -- qsirecon/workflows/dwi/registration.py | 261 --- qsirecon/workflows/dwi/resampling.py | 321 ---- qsirecon/workflows/dwi/util.py | 264 --- qsirecon/workflows/fieldmap/__init__.py | 50 - qsirecon/workflows/fieldmap/base.py | 282 --- qsirecon/workflows/fieldmap/drbuddi.py | 233 --- qsirecon/workflows/fieldmap/fmap.py | 121 -- qsirecon/workflows/fieldmap/pepolar.py | 333 ---- qsirecon/workflows/fieldmap/phdiff.py | 172 -- qsirecon/workflows/fieldmap/syn.py | 230 --- qsirecon/workflows/fieldmap/unwarp.py | 342 ---- qsirecon/workflows/fieldmap/utils.py | 126 -- qsirecon/workflows/recon/anatomical.py | 52 +- tests/grouping_tests.py | 56 - 64 files changed, 77 insertions(+), 21868 deletions(-) delete mode 100644 qsirecon/interfaces/confounds.py delete mode 100644 qsirecon/interfaces/connectivity.py delete mode 100644 qsirecon/interfaces/dwi_merge.py delete mode 100644 qsirecon/interfaces/eddy.py delete mode 100644 qsirecon/interfaces/epi_fmap.py delete mode 100644 qsirecon/interfaces/fmap.py delete mode 100644 qsirecon/interfaces/itk.py delete mode 100644 qsirecon/interfaces/nilearn.py delete mode 100644 qsirecon/interfaces/niworkflows.py delete mode 100644 qsirecon/interfaces/patch2self.py delete mode 100644 qsirecon/interfaces/shoreline.py delete mode 100644 qsirecon/interfaces/surf.py delete mode 100644 qsirecon/utils/bspline.py delete mode 100644 qsirecon/utils/grouping.py delete mode 100644 qsirecon/workflows/anatomical/__init__.py delete mode 100644 qsirecon/workflows/anatomical/surface.py delete mode 100644 qsirecon/workflows/anatomical/volume.py delete mode 100644 qsirecon/workflows/dwi/__init__.py delete mode 100644 qsirecon/workflows/dwi/base.py delete mode 100644 qsirecon/workflows/dwi/confounds.py delete mode 100644 qsirecon/workflows/dwi/derivatives.py delete mode 100644 qsirecon/workflows/dwi/distortion_group_merge.py delete mode 100644 qsirecon/workflows/dwi/finalize.py delete mode 100644 qsirecon/workflows/dwi/fsl.py delete mode 100644 qsirecon/workflows/dwi/hmc.py delete mode 100644 qsirecon/workflows/dwi/hmc_sdc.py delete mode 100644 qsirecon/workflows/dwi/intramodal_template.py delete mode 100644 qsirecon/workflows/dwi/merge.py delete mode 100644 qsirecon/workflows/dwi/pre_hmc.py delete mode 100644 qsirecon/workflows/dwi/qc.py delete mode 100644 qsirecon/workflows/dwi/registration.py delete mode 100644 qsirecon/workflows/dwi/resampling.py delete mode 100644 qsirecon/workflows/dwi/util.py delete mode 100644 qsirecon/workflows/fieldmap/__init__.py delete mode 100644 qsirecon/workflows/fieldmap/base.py delete mode 100644 qsirecon/workflows/fieldmap/drbuddi.py delete mode 100644 qsirecon/workflows/fieldmap/fmap.py delete mode 100644 qsirecon/workflows/fieldmap/pepolar.py delete mode 100644 qsirecon/workflows/fieldmap/phdiff.py delete mode 100644 qsirecon/workflows/fieldmap/syn.py delete mode 100644 qsirecon/workflows/fieldmap/unwarp.py delete mode 100644 qsirecon/workflows/fieldmap/utils.py delete mode 100644 tests/grouping_tests.py diff --git a/qsirecon/interfaces/__init__.py b/qsirecon/interfaces/__init__.py index 687b3719..50a466bb 100644 --- a/qsirecon/interfaces/__init__.py +++ b/qsirecon/interfaces/__init__.py @@ -1,22 +1,5 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from .bids import ( - BIDSDataGrabber, - BIDSInfo, - DerivativesDataSink, - DerivativesMaybeDataSink, -) -from .confounds import DMRISummary, GatherConfounds -from .fmap import FieldToHz, FieldToRadS, Phasediff2Fieldmap, Phases2Fieldmap -from .freesurfer import ( - FSDetectInputs, - FSInjectBrainExtracted, - MakeMidthickness, - RefineBrainMask, - StructuralReference, -) -from .images import Conform, ConformDwi, IntraModalMerge, ValidateImage -from .reports import AboutSummary, SubjectSummary -from .surf import NormalizeSurf -from .utils import AddTSVHeader, ConcatAffines +from .bids import DerivativesDataSink +from .images import ConformDwi, ValidateImage diff --git a/qsirecon/interfaces/anatomical.py b/qsirecon/interfaces/anatomical.py index c34ac0cb..3e9aad5c 100644 --- a/qsirecon/interfaces/anatomical.py +++ b/qsirecon/interfaces/anatomical.py @@ -14,9 +14,7 @@ from pathlib import Path import nibabel as nb -import nilearn.image as nim import numpy as np -from dipy.segment.threshold import otsu from nipype import logging from nipype.interfaces.base import ( BaseInterfaceInputSpec, @@ -27,16 +25,12 @@ traits, ) from nipype.utils.filemanip import fname_presuffix -from pkg_resources import resource_filename as pkgr from pkg_resources import resource_filename as pkgrf -from scipy import ndimage -from scipy.spatial import distance from ..utils.ingress import ukb_dirname_to_bids from .images import to_lps LOGGER = logging.getLogger("nipype.interface") -KNOWN_TEMPLATES = ["MNI152NLin2009cAsym", "infant"] class QSIReconAnatomicalIngressInputSpec(BaseInterfaceInputSpec): @@ -198,94 +192,6 @@ def _run_interface(self, runtime): return runtime -class _DiceOverlapInputSpec(BaseInterfaceInputSpec): - anatomical_mask = File(exists=True, mandatory=True, desc="Mask from a T1w image") - dwi_mask = File(exists=True, mandatory=True, desc="Mask from a DWI image") - - -class _DiceOverlapOutputSpec(TraitedSpec): - dice_score = traits.Float() - - -class DiceOverlap(SimpleInterface): - input_spec = _DiceOverlapInputSpec - output_spec = _DiceOverlapOutputSpec - - def _run_interface(self, runtime): - t1_img = nb.load(self.inputs.anatomical_mask) - dwi_img = nb.load(self.inputs.dwi_mask) - - if not t1_img.shape == dwi_img.shape: - raise Exception("Cannot compare masks with different shapes") - - self._results["dice_score"] = distance.dice( - t1_img.get_fdata().flatten(), dwi_img.get_fdata().flatten() - ) - return runtime - - -class _VoxelSizeChooserInputSpec(BaseInterfaceInputSpec): - voxel_size = traits.Float() - input_image = File(exists=True) - anisotropic_strategy = traits.Enum("min", "max", "mean", usedefault=True) - - -class _VoxelSizeChooserOutputSpec(TraitedSpec): - voxel_size = traits.Float() - - -class VoxelSizeChooser(SimpleInterface): - input_spec = _VoxelSizeChooserInputSpec - output_spec = _VoxelSizeChooserOutputSpec - - def _run_interface(self, runtime): - if not isdefined(self.inputs.input_image) and not isdefined(self.inputs.voxel_size): - raise Exception("Either voxel_size or input_image need to be defined") - - # A voxel size was specified without an image - if isdefined(self.inputs.voxel_size): - voxel_size = self.inputs.voxel_size - else: - # An image was provided - img = nb.load(self.inputs.input_image) - zooms = img.header.get_zooms()[:3] - if self.inputs.anisotropic_strategy == "min": - voxel_size = min(zooms) - elif self.inputs.anisotropic_strategy == "max": - voxel_size = max(zooms) - else: - voxel_size = np.round(np.mean(zooms), 2) - - self._results["voxel_size"] = voxel_size - return runtime - - -class _FakeSegmentationInputSpec(BaseInterfaceInputSpec): - mask_file = File(exists=True, mandatory=True) - - -class _FakeSegmentationOutputSpec(TraitedSpec): - dseg_file = File(exists=True) - - -class FakeSegmentation(SimpleInterface): - input_spec = _FakeSegmentationInputSpec - output_spec = _FakeSegmentationOutputSpec - - def _run_interface(self, runtime): - img = nb.load(self.inputs.mask_file) - orig_mask = img.get_fdata() > 0 - eroded1 = ndimage.binary_erosion(orig_mask, iterations=3) - eroded2 = ndimage.binary_erosion(eroded1, iterations=3) - final = orig_mask.astype(int) + eroded1 + eroded2 - out_img = nb.Nifti1Image(final, img.affine, header=img.header) - out_fname = fname_presuffix(self.inputs.mask_file, suffix="_dseg", newpath=runtime.cwd) - out_img.to_filename(out_fname) - self._results["dseg_file"] = out_fname - - return runtime - - """ The spherical harmonic coefficients are stored as follows. First, since the @@ -404,130 +310,37 @@ def calculate_order(order): return runtime -class _DesaturateSkullInputSpec(BaseInterfaceInputSpec): - skulled_t2w_image = File(exists=True, mandatory=True, desc="Skull-on T2w image") - brain_mask_image = File( - exists=True, mandatory=True, desc="Binary brain mask in the same grid as skulled_t2w_image" - ) - brain_to_skull_ratio = traits.CFloat( - 8.0, usedefault=True, desc="Ratio of signal in the brain to signal in the skull" - ) +class _VoxelSizeChooserInputSpec(BaseInterfaceInputSpec): + voxel_size = traits.Float() + input_image = File(exists=True) + anisotropic_strategy = traits.Enum("min", "max", "mean", usedefault=True) -class _DesaturateSkullOutputSpec(TraitedSpec): - desaturated_t2w = File(exists=True) - head_scaling_factor = traits.Float(0.0) +class _VoxelSizeChooserOutputSpec(TraitedSpec): + voxel_size = traits.Float() -class DesaturateSkull(SimpleInterface): - input_spec = _DesaturateSkullInputSpec - output_spec = _DesaturateSkullOutputSpec +class VoxelSizeChooser(SimpleInterface): + input_spec = _VoxelSizeChooserInputSpec + output_spec = _VoxelSizeChooserOutputSpec def _run_interface(self, runtime): + if not isdefined(self.inputs.input_image) and not isdefined(self.inputs.voxel_size): + raise Exception("Either voxel_size or input_image need to be defined") - out_file = fname_presuffix( - self.inputs.skulled_t2w_image, - newpath=runtime.cwd, - suffix="_desaturated.nii", - use_ext=False, - ) - skulled_img = nim.load_img(self.inputs.skulled_t2w_image) - brainmask_img = nim.load_img(self.inputs.brain_mask_image) - brain_median, nonbrain_head_median = calculate_nonbrain_saturation( - skulled_img, brainmask_img - ) - - actual_brain_to_skull_ratio = brain_median / nonbrain_head_median - LOGGER.info("found brain to skull ratio: %.3f", actual_brain_to_skull_ratio) - desat_data = skulled_img.get_fdata(dtype="float32").copy() - adjustment = 1.0 - if actual_brain_to_skull_ratio < self.inputs.brain_to_skull_ratio: - # We need to downweight the non-brain voxels - adjustment = actual_brain_to_skull_ratio / self.inputs.brain_to_skull_ratio - LOGGER.info("Desaturating outside-brain signal by %.5f" % adjustment) - nonbrain_mask = brainmask_img.get_fdata() < 1 - # Apply the adjustment - desat_data[nonbrain_mask] = desat_data[nonbrain_mask] * adjustment - - desat_img = nim.new_img_like(skulled_img, desat_data, copy_header=True) - desat_img.header.set_data_dtype("float32") - desat_img.to_filename(out_file) - self._results["desaturated_t2w"] = out_file - self._results["head_scaling_factor"] = adjustment - return runtime - - -def calculate_nonbrain_saturation(head_img, brain_mask_img): - # Calculate the - head_data = head_img.get_fdata() - brain_mask = brain_mask_img.get_fdata() > 0 - - def clip_values(values): - _, top_percent = np.percentile(values, np.array([0, 99.75]), axis=None) - return np.clip(values, 0, top_percent) - - nonbrain_voxels = head_data[np.logical_not(brain_mask)] - winsorized_nonbrain_voxels = clip_values(nonbrain_voxels) - threshold = otsu(winsorized_nonbrain_voxels) * 0.5 - - nbmask = np.zeros_like(head_img.get_fdata()) - nbmask[head_data > threshold] = 2 - nbmask[brain_mask] = 0 - - in_brain_median = np.median(head_data[brain_mask]) - non_brain_head_median = np.median(head_data[nbmask > 0]) - - return in_brain_median, non_brain_head_median - - -class _GetTemplateInputSpec(BaseInterfaceInputSpec): - template_name = traits.Str("MNI152NLin2009cAsym", usedefault=True, mandatory=True) - t1_file = File(exists=True) - t2_file = File(exists=True) - mask_file = File(exists=True) - infant_mode = traits.Bool(False, usedefault=True) - anatomical_contrast = traits.Enum("T1w", "T2w", "none") - - -class _GetTemplateOutputSpec(BaseInterfaceInputSpec): - template_name = traits.Str() - template_file = File(exists=True) - template_brain_file = File(exists=True) - template_mask_file = File(exists=True) - - -class GetTemplate(SimpleInterface): - input_spec = _GetTemplateInputSpec - output_spec = _GetTemplateOutputSpec - - def _run_interface(self, runtime): - self._results["template_name"] = self.inputs.template_name - contrast_name = self.inputs.anatomical_contrast.lower() - if contrast_name == "none": - LOGGER.info("Using T1w modality template for ACPC alignment") - contrast_name = "t1w" - - # Cover the cases where the template images are actually in the - # qsirecon package. This is for common use cases (MNI2009cAsym and Infant) - # and legacy - if self.inputs.template_name in KNOWN_TEMPLATES or self.inputs.infant_mode: - if not self.inputs.infant_mode: - ref_img = pkgr("qsirecon", "data/mni_1mm_%s_lps.nii.gz" % contrast_name) - ref_img_brain = pkgr( - "qsirecon", - "data/mni_1mm_%s_lps_brain.nii.gz" % contrast_name, - ) - ref_img_mask = pkgr("qsirecon", "data/mni_1mm_t1w_lps_brainmask.nii.gz") - else: - ref_img = pkgr("qsirecon", "data/mni_1mm_%s_lps_infant.nii.gz" % contrast_name) - ref_img_brain = pkgr( - "qsirecon", "data/mni_1mm_%s_lps_brain_infant.nii.gz" % contrast_name - ) - ref_img_mask = pkgr("qsirecon", "data/mni_1mm_t1w_lps_brainmask_infant.nii.gz") - self._results["template_file"] = ref_img - self._results["template_brain_file"] = ref_img_brain - self._results["template_mask_file"] = ref_img_mask + # A voxel size was specified without an image + if isdefined(self.inputs.voxel_size): + voxel_size = self.inputs.voxel_size else: - raise NotImplementedError("Arbitrary templates not available yet") + # An image was provided + img = nb.load(self.inputs.input_image) + zooms = img.header.get_zooms()[:3] + if self.inputs.anisotropic_strategy == "min": + voxel_size = min(zooms) + elif self.inputs.anisotropic_strategy == "max": + voxel_size = max(zooms) + else: + voxel_size = np.round(np.mean(zooms), 2) + self._results["voxel_size"] = voxel_size return runtime diff --git a/qsirecon/interfaces/ants.py b/qsirecon/interfaces/ants.py index 7069570d..8777835b 100644 --- a/qsirecon/interfaces/ants.py +++ b/qsirecon/interfaces/ants.py @@ -1,17 +1,12 @@ #!python import logging -import os import os.path as op -import nibabel as nb from nipype.interfaces.base import ( BaseInterfaceInputSpec, CommandLine, CommandLineInputSpec, File, - InputMultiObject, - OutputMultiObject, - SimpleInterface, TraitedSpec, isdefined, traits, @@ -21,122 +16,6 @@ LOGGER = logging.getLogger("nipype.interface") -# Step 1 from DSI Studio, importing DICOM files or nifti -class MultivariateTemplateConstruction2InputSpec(CommandLineInputSpec): - dimension = traits.Enum(2, 3, 4, default=3, usedefault=True, argstr="-d %d") - input_file = File(desc="txt or csv file with images", exists=True, position=-1) - input_images = InputMultiObject( - traits.Either(File(exists=True), InputMultiObject(File(exists=True))), - desc="list of images or lists of images", - xor=("input_file",), - argstr="%s", - position=-1, - copyfile=False, - ) - image_statistic = traits.Enum( - 0, - 1, - 2, - default=1, - usedefault=True, - desc="statistic used to summarize " - "images. 0=mean, 1= mean of normalized intensities, 2=median", - ) - iteration_limit = traits.Int( - 4, usedefault=True, argstr="-i %d", desc="maximum number of iterations" - ) - backup_images = traits.Bool(False, argstr="-b %d") - parallel_control = traits.Enum( - 0, - 1, - 2, - 3, - 4, - 5, - desc="Control for parallel computation " - "0 = run serially, " - "1 = SGE qsub, " - "2 = use PEXEC (localhost), " - "3 = Apple XGrid, " - "4 = PBS qsub, " - "5 = SLURM", - argstr="-c %d", - usedefault=True, - hash_files=False, - ) - num_cores = traits.Int(default=1, usedefault=True, argstr="-j %d", hash_files=False) - num_modalities = traits.Int( - 1, - usedefault=True, - desc="Number of modalities used " - "to construct the template (default 1): For example, " - "if one wanted to create a multimodal template consisting of T1,T2,and FA " - 'components ("-k 3")', - argstr="-k %d", - ) - modality_weights = traits.List([1], usedefault=True) - n4_bias_correct = traits.Bool(True, usedefault=True, argstr="-n %d") - metric = traits.Str("CC", usedefault=True, argstr="-m %s", mandatory=True) - transform = traits.Enum( - "BSplineSyN", "SyN", "Affine", usedefault=True, argstr="-t %s", mandatory=True - ) - output_prefix = traits.Str("antsBTP") - gradient_step = traits.Float(0.25, usedefault=True, mandatory=True, argstr="-g %.3f") - use_full_affine = traits.Bool(False, usedefault=True, argstr="-y %d") - usefloat = traits.Bool(True, argstr="-e %d", usedefault=True) - - -class MultivariateTemplateConstruction2OutputSpec(TraitedSpec): - templates = OutputMultiObject(File(exists=True), mandatory=True) - forward_transforms = OutputMultiObject(OutputMultiObject(File(exists=True)), mandatory=True) - reverse_transforms = OutputMultiObject(OutputMultiObject(File(exists=True)), mandatory=True) - iteration_templates = OutputMultiObject(File(exists=True)) - - -class MultivariateTemplateConstruction2(CommandLine): - input_spec = MultivariateTemplateConstruction2InputSpec - output_spec = MultivariateTemplateConstruction2OutputSpec - _cmd = "antsMultivariateTemplateConstruction2.sh " - - def _format_arg(self, opt, spec, val): - if opt == "input_images": - return " ".join([op.split(fname)[1] for fname in val]) - if opt == "modality_weights": - return "x".join(["%.3f" % weight for weight in val]) - return super(MultivariateTemplateConstruction2, self)._format_arg(opt, spec, val) - - def _list_outputs(self): - if isdefined(self.inputs.input_file): - raise NotImplementedError() - forward_transforms = [] - reverse_transforms = [] - if isdefined(self.inputs.output_prefix): - prefix = self.inputs.output_prefix - else: - prefix = "antsBTP" - cwd = os.getcwd() - for num, input_image in enumerate(self.inputs.input_images): - if isinstance(input_image, list): - input_image = input_image[0] - path, fname, ext = split_filename(input_image) - affine = "%s/%s%s%d0GenericAffine.mat" % (cwd, prefix, fname, num) - warp = "%s/%s%s%d1Warp.nii.gz" % (cwd, prefix, fname, num) - inv_warp = "%s/%s%s%d1InverseWarp.nii.gz" % (cwd, prefix, fname, num) - forward_transforms.append([affine, warp]) - reverse_transforms.append([inv_warp, affine]) - - templates = [ - "%s/%stemplate%s.nii.gz" % (cwd, prefix, tnum) - for tnum in range(self.inputs.num_modalities) - ] - outputs = self.output_spec().get() - outputs["forward_transforms"] = forward_transforms - outputs["reverse_transforms"] = reverse_transforms - outputs["templates"] = templates - - return outputs - - class ImageMathInputSpec(BaseInterfaceInputSpec): in_file = File(exists=True, mandatory=True, position=3, argstr="%s") dimension = traits.Enum(3, 2, 4, usedefault=True, argstr="%d", position=0) @@ -190,26 +69,3 @@ class ConvertTransformFile(CommandLine): _cmd = "ConvertTransformFile" input_spec = _ConvertTransformFileInputSpec output_spec = _ConvertTransformFileOutputSpec - - -class _GetImageTypeInputSpec(BaseInterfaceInputSpec): - image = File(exists=True, mandatory=True) - - -class _GetImageTypeOutputSpec(TraitedSpec): - image_type = traits.Enum(0, 1, 2, 3) - - -class GetImageType(SimpleInterface): - """Use to determine what to send to --input-image-type.""" - - input_spec = _GetImageTypeInputSpec - output_spec = _GetImageTypeOutputSpec - - def _run_interface(self, runtime): - img = nb.load(self.inputs.image) - if img.ndim == 4: - self._results["image_type"] = 3 - else: - self._results["image_type"] = 0 - return runtime diff --git a/qsirecon/interfaces/bids.py b/qsirecon/interfaces/bids.py index b94e6b2b..be638ebf 100644 --- a/qsirecon/interfaces/bids.py +++ b/qsirecon/interfaces/bids.py @@ -33,7 +33,6 @@ InputMultiObject, OutputMultiPath, SimpleInterface, - Str, TraitedSpec, isdefined, traits, @@ -78,133 +77,6 @@ def get_bids_params(fullpath): return matches -class FileNotFoundError(IOError): - pass - - -class BIDSInfoInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, desc="input file, part of a BIDS tree") - - -class BIDSInfoOutputSpec(TraitedSpec): - subject_id = traits.Str() - session_id = traits.Str() - task_id = traits.Str() - acq_id = traits.Str() - rec_id = traits.Str() - run_id = traits.Str() - - -class BIDSInfo(SimpleInterface): - """ - Extract metadata from a BIDS-conforming filename - - This interface uses only the basename, not the path, to determine the - subject, session, task, run, acquisition or reconstruction. - - """ - - input_spec = BIDSInfoInputSpec - output_spec = BIDSInfoOutputSpec - - def _run_interface(self, runtime): - match = BIDS_NAME.search(self.inputs.in_file) - params = match.groupdict() if match is not None else {} - self._results = {key: val for key, val in list(params.items()) if val is not None} - return runtime - - -class BIDSDataGrabberInputSpec(BaseInterfaceInputSpec): - subject_data = traits.Dict(Str, traits.Any) - subject_id = Str() - - -class BIDSDataGrabberOutputSpec(TraitedSpec): - out_dict = traits.Dict(desc="output data structure") - fmap = OutputMultiPath(desc="output fieldmaps") - bold = OutputMultiPath(desc="output functional images") - sbref = OutputMultiPath(desc="output sbrefs") - t1w = OutputMultiPath(desc="output T1w images") - roi = OutputMultiPath(desc="output ROI images") - t2w = OutputMultiPath(desc="output T2w images") - flair = OutputMultiPath(desc="output FLAIR images") - dwi = OutputMultiPath(desc="output DWI images") - - -class BIDSDataGrabber(SimpleInterface): - """ - Collect files from a BIDS directory structure - - >>> from qsirecon.interfaces import BIDSDataGrabber - >>> from qsirecon.utils.bids import collect_data - >>> bids_src = BIDSDataGrabber(anat_only=False) - >>> bids_src.inputs.subject_data = collect_data('ds114', '01')[0] - >>> bids_src.inputs.subject_id = 'ds114' - >>> res = bids_src.run() - >>> res.outputs.t1w # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE - ['.../ds114/sub-01/ses-retest/anat/sub-01_ses-retest_T1w.nii.gz', - '.../ds114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'] - - """ - - input_spec = BIDSDataGrabberInputSpec - output_spec = BIDSDataGrabberOutputSpec - _require_funcs = True - - def __init__(self, *args, **kwargs): - anat_only = kwargs.pop("anat_only") - dwi_only = kwargs.pop("dwi_only") - anatomical_contrast = kwargs.pop("anatomical_contrast") - self._anatomical_contrast = anatomical_contrast - super(BIDSDataGrabber, self).__init__(*args, **kwargs) - if anat_only is not None: - self._require_funcs = not anat_only - self._no_anat_necessary = bool(dwi_only) or anatomical_contrast == "none" - - def _run_interface(self, runtime): - bids_dict = self.inputs.subject_data - - self._results["out_dict"] = bids_dict - self._results.update(bids_dict) - - if not bids_dict["t1w"]: - message = "No T1w images found for subject sub-{}".format(self.inputs.subject_id) - if self._no_anat_necessary: - LOGGER.info("%s, but no problem because --dwi-only was selected.", message) - elif self._anatomical_contrast != "T1w": - LOGGER.info( - "%s, but no problem because --anat-modality %s was selected.", - message, - self._anatomical_contrast, - ) - else: - raise FileNotFoundError(message) - - if not bids_dict["t2w"]: - message = "No T2w images found for subject sub-{}".format(self.inputs.subject_id) - if self._no_anat_necessary: - LOGGER.info("%s, but no problem because --dwi-only was selected.", message) - elif self._anatomical_contrast != "T2w": - LOGGER.info( - "%s, but no problem because --anat-modality %s was selected.", - message, - self._anatomical_contrast, - ) - else: - raise FileNotFoundError(message) - - if self._no_anat_necessary and not bids_dict["dwi"]: - raise FileNotFoundError( - "No DWI images found for subject sub-{}".format(self.inputs.subject_id) - ) - - for imtype in ["flair", "fmap", "sbref", "roi", "dwi"]: - if not bids_dict[imtype]: - LOGGER.warning("No '%s' images found for sub-%s", imtype, self.inputs.subject_id) - - return runtime - - class DerivativesDataSinkInputSpec(BaseInterfaceInputSpec): base_directory = traits.Directory(desc="Path to the base directory for storing data.") in_file = traits.Either( @@ -246,7 +118,7 @@ class DerivativesDataSink(SimpleInterface): >>> from pathlib import Path >>> import tempfile - >>> from qsirecon.utils.bids import collect_data + >>> from qsiprep.utils.bids import collect_data >>> tmpdir = Path(tempfile.mkdtemp()) >>> tmpfile = tmpdir / 'a_temp_file.nii.gz' >>> tmpfile.open('w').close() # "touch" the file @@ -346,24 +218,6 @@ def _run_interface(self, runtime): return runtime -class _DerivativesMaybeDataSinkInputSpec(DerivativesDataSinkInputSpec): - in_file = traits.Either( - traits.Directory(exists=True), - InputMultiObject(File(exists=True)), - mandatory=False, - desc="the object to be saved", - ) - - -class DerivativesMaybeDataSink(DerivativesDataSink): - input_spec = _DerivativesMaybeDataSinkInputSpec - - def _run_interface(self, runtime): - if not isdefined(self.inputs.in_file): - return runtime - return super(DerivativesMaybeDataSink, self)._run_interface(runtime) - - recon_entity_order = ["atlas", "model", "bundles", "fit", "mdp", "mfp", "bundle", "label"] diff --git a/qsirecon/interfaces/confounds.py b/qsirecon/interfaces/confounds.py deleted file mode 100644 index 2e6e7cbb..00000000 --- a/qsirecon/interfaces/confounds.py +++ /dev/null @@ -1,260 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Handling confounds -^^^^^^^^^^^^^^^^^^ - - >>> import os - >>> import pandas as pd - -""" -import json -import os -import re - -import numpy as np -import pandas as pd -from nipype import logging -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - InputMultiObject, - SimpleInterface, - TraitedSpec, - isdefined, - traits, -) - -from .gradients import concatenate_bvals, concatenate_bvecs -from .niworkflows import dMRIPlot - -LOGGER = logging.getLogger("nipype.interface") - - -class GatherConfoundsInputSpec(BaseInterfaceInputSpec): - fd = File(exists=True, desc="input framewise displacement") - motion = File(exists=True, desc="input motion parameters") - sliceqc_file = File(exists=True, desc="output from sliceqc") - original_files = traits.List(desc="original grouping of each volume") - original_bvecs = InputMultiObject(File(exists=True), desc="original bvec files") - original_bvals = InputMultiObject(File(exists=True), desc="originals bval files") - denoising_confounds = File(exists=True, desc="descriptive statistics from denoising") - - -class GatherConfoundsOutputSpec(TraitedSpec): - confounds_file = File(exists=True, desc="output confounds file") - confounds_list = traits.List(traits.Str, desc="list of headers") - - -class GatherConfounds(SimpleInterface): - """ - Combine various sources of confounds in one TSV file - - """ - - input_spec = GatherConfoundsInputSpec - output_spec = GatherConfoundsOutputSpec - - def _run_interface(self, runtime): - combined_out, confounds_list = _gather_confounds( - fdisp=self.inputs.fd, - sliceqc_file=self.inputs.sliceqc_file, - motion=self.inputs.motion, - original_files=self.inputs.original_files, - original_bvals=concatenate_bvals(self.inputs.original_bvals, None), - original_bvecs=concatenate_bvecs(self.inputs.original_bvecs), - denoising_confounds=self.inputs.denoising_confounds, - newpath=runtime.cwd, - ) - self._results["confounds_file"] = combined_out - self._results["confounds_list"] = confounds_list - return runtime - - -def _gather_confounds( - fdisp=None, - motion=None, - sliceqc_file=None, - newpath=None, - original_files=None, - original_bvals=None, - original_bvecs=None, - denoising_confounds=None, -): - """ - Load confounds from the filenames, concatenate together horizontally - and save new file. - - >>> from tempfile import TemporaryDirectory - >>> tmpdir = TemporaryDirectory() - >>> os.chdir(tmpdir.name) - >>> pd.DataFrame({'FramewiseDisplacement': [0.1]}).to_csv('FD.txt', index=False, na_rep='n/a') - >>> pd.DataFrame({'trans_x': [0.2], 'trans_y': [0.3], 'trans_z': [0.4], - ... 'rot_x': [0.5], 'rot_y': [0.6], 'rot_z': [0.7]}).to_csv( - ... 'spm_motion.tsv', index=False, na_rep='n/a') - >>> out_file, confound_list = _gather_confounds('FD.txt', 'spm_motion.tsv') - >>> confound_list - ['Framewise displacement', 'Motion parameters'] - - >>> pd.read_csv(out_file, sep='\s+', index_col=None, - ... engine='python') # doctest: +NORMALIZE_WHITESPACE - framewise_displacement trans_x,trans_y,trans_z,rot_x,rot_y,rot_z - 0 0.1 0.2,0.3,0.4,0.5,0.6,0.7 - - - """ - - def less_breakable(a_string): - """hardens the string to different envs (i.e. case insensitive, no whitespace, '#'""" - return "".join(a_string.split()).strip("#") - - # Taken from https://stackoverflow.com/questions/1175208/ - # If we end up using it more than just here, probably worth pulling in a well-tested package - def camel_to_snake(name): - s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() - - def _adjust_indices(left_df, right_df): - # This forces missing values to appear at the beggining of the DataFrame - # instead of the end - index_diff = len(left_df.index) - len(right_df.index) - if index_diff > 0: - right_df.index = range(index_diff, len(right_df.index) + index_diff) - elif index_diff < 0: - left_df.index = range(-index_diff, len(left_df.index) - index_diff) - - all_files = [] - confounds_list = [] - for confound, name in ((fdisp, "Framewise displacement"), (motion, "Motion parameters")): - if confound is not None and isdefined(confound): - confounds_list.append(name) - if os.path.exists(confound) and os.stat(confound).st_size > 0: - all_files.append(confound) - - confounds_data = pd.DataFrame() - for file_name in all_files: # assumes they all have headings already - new = pd.read_csv(file_name, sep="\t") - for column_name in new.columns: - new.rename( - columns={column_name: camel_to_snake(less_breakable(column_name))}, inplace=True - ) - - _adjust_indices(confounds_data, new) - confounds_data = pd.concat((confounds_data, new), axis=1) - - # Sort the confounds by index so that the remaining columns align properly. - # confounds_data['some_col'] = some_vector doesn't account for the index - # being out of sequence. pd.concat does respect indices, but eventually we - # want to write to csv in sequential order anyway - confounds_data.sort_index(axis=0, inplace=True) - - # Add in the sliceqc measures - if isdefined(sliceqc_file) and sliceqc_file is not None: - if sliceqc_file.endswith(".npz"): - sqc = np.load(sliceqc_file) - confounds_data["hmc_r2"] = sqc["wb_r2s"] - confounds_data["hmc_xcorr"] = sqc["wb_xcorrs"] - confounds_list += ["hmc_r2", "hmc_xcorr"] - else: - sqc = np.loadtxt(sliceqc_file, skiprows=1) - confounds_data["eddy_stdevs"] = sqc.sum(axis=1) - - if newpath is None: - newpath = os.getcwd() - - if original_files is not None and isdefined(original_files): - file_array = np.array([os.path.split(fname)[1] for fname in original_files]) - confounds_data["original_file"] = file_array - confounds_list += ["original_file"] - - if original_bvecs is not None and isdefined(original_bvecs): - confounds_data["grad_x"] = original_bvecs[:, 0] - confounds_data["grad_y"] = original_bvecs[:, 1] - confounds_data["grad_z"] = original_bvecs[:, 2] - - if original_bvals is not None and isdefined(original_bvals): - confounds_data["bval"] = original_bvals - - if denoising_confounds is not None and isdefined(denoising_confounds): - denoising = pd.read_csv(denoising_confounds) - denoising.original_file = denoising.original_file.str.split("/").str[-1] - denoising_check = denoising[["original_bx", "original_by", "original_bz", "original_bval"]] - confound_check = confounds_data[["grad_x", "grad_y", "grad_z", "bval"]] - - # Check that the gradients and original files match after recombining - denoising.to_csv(newpath + "/denoising.csv") - confounds_data.to_csv(newpath + "/confounds_data.csv") - if not np.allclose(denoising_check.to_numpy(), confound_check.to_numpy()): - raise Exception("Gradients don't match. File a bug report!") - if not denoising["original_file"].eq(confounds_data["original_file"]).all(): - raise Exception("Original files don't match. File a bug report!") - denoising.drop( - columns=[ - "original_file", - "original_bval", - "original_bx", - "original_by", - "original_bz", - ], - inplace=True, - ) - confounds_data = pd.concat([confounds_data, denoising], axis=1) - confounds_list += denoising.columns.to_list() - - combined_out = os.path.join(newpath, "confounds.tsv") - confounds_data.to_csv(combined_out, sep="\t", index=False, na_rep="n/a") - - return combined_out, confounds_list - - -class DMRISummaryInputSpec(BaseInterfaceInputSpec): - confounds_file = File(exists=True, desc="BIDS' _confounds.tsv file") - sliceqc_file = File(exists=True, desc="output from SliceQC") - sliceqc_mask = File(exists=True, desc="Mask") - - str_or_tuple = traits.Either( - traits.Str, - traits.Tuple(traits.Str, traits.Either(None, traits.Str)), - traits.Tuple(traits.Str, traits.Either(None, traits.Str), traits.Either(None, traits.Str)), - ) - confounds_list = traits.List( - str_or_tuple, minlen=1, desc="list of headers to extract from the confounds_file" - ) - bval_files = InputMultiObject(File(exists=True), desc="bvals files") - orig_bvecs = InputMultiObject(File(exists=True), desc="original bvecs file") - - -class DMRISummaryOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") - carpetplot_json = File(exists=True) - - -class DMRISummary(SimpleInterface): - input_spec = DMRISummaryInputSpec - output_spec = DMRISummaryOutputSpec - - def _run_interface(self, runtime): - self._results["out_file"] = os.path.join(runtime.cwd, "dmriplot.svg") - - dataframe = pd.read_csv( - self.inputs.confounds_file, sep="\t", index_col=None, na_filter=True, na_values="n/a" - ) - - plotter = dMRIPlot( - sliceqc_file=self.inputs.sliceqc_file, - mask_file=self.inputs.sliceqc_mask, - confounds=dataframe, - ) - fig = plotter.plot() - fig.savefig(self._results["out_file"], bbox_inches="tight") - - # Write a json file of the carpetplot data - carpetplot_json = os.path.join(runtime.cwd, "carpetplot.json") - with open(carpetplot_json, "w") as carpet_file: - json.dump( - {"carpetplot": np.nan_to_num(plotter.qc_data["slice_scores"]).tolist()}, - carpet_file, - ) - self._results["carpetplot_json"] = carpetplot_json - return runtime diff --git a/qsirecon/interfaces/connectivity.py b/qsirecon/interfaces/connectivity.py deleted file mode 100644 index e7e863f5..00000000 --- a/qsirecon/interfaces/connectivity.py +++ /dev/null @@ -1,73 +0,0 @@ -from __future__ import print_function - -import logging - -import numpy as np -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - SimpleInterface, - TraitedSpec, -) -from nipype.utils.filemanip import fname_presuffix -from scipy.io.matlab import loadmat, savemat -from scipy.linalg import schur, svd - -LOGGER = logging.getLogger("nipype.interface") - - -class ControllabilityInputSpec(BaseInterfaceInputSpec): - matfile = File(exists=True, desc="connectivity matrices in matlab format") - - -class ControllabilityOutputSpec(TraitedSpec): - controllability = File(exists=True, desc="input connectivity data and controllability") - - -class Controllability(SimpleInterface): - input_spec = ControllabilityInputSpec - output_spec = ControllabilityOutputSpec - - def _run_interface(self, runtime): - mat = loadmat(self.inputs.matfile, squeeze_me=True) - outfile = fname_presuffix( - self.inputs.matfile, suffix="_controllability", newpath=runtime.cwd - ) - connectivity_info = _calculate_controllability(mat) - LOGGER.info("writing %s", outfile) - savemat(outfile, connectivity_info, do_compression=True) - self._results["controllability"] = outfile - return runtime - - -def ave_control(A): - Anormed = A / (1 + svd(A)[1][0]) # Matrix normalization - T, U = schur(Anormed, "real") # Schur stability - - midMat = (U**2).T - v = np.diag(T) - P = np.column_stack([1 - v * v.T] * A.shape[0]) - return np.sum(midMat / P, axis=0) - - -def modal_control(A): - Anormed = A / (1 + svd(A)[1][0]) # Matrix normalization - T, U = schur(Anormed, "real") # Schur stability - eigVals = np.diag(T) - N = A.shape[0] - phi = np.zeros(N) - - b = 1 - eigVals**2 - U2 = U**2 - for i in range(N): - phi[i] = np.dot(U2[i], b) - return phi - - -def _calculate_controllability(mat): - connectivity_keys = [k for k in mat.keys() if k.endswith("connectivity")] - for key in connectivity_keys: - adjmat = mat[key] - mat[key + "_modal_ctl"] = modal_control(adjmat) - mat[key + "_ave_ctl"] = ave_control(adjmat) - return mat diff --git a/qsirecon/interfaces/dipy.py b/qsirecon/interfaces/dipy.py index c88d2a1c..c8496bb0 100644 --- a/qsirecon/interfaces/dipy.py +++ b/qsirecon/interfaces/dipy.py @@ -9,7 +9,6 @@ """ import shutil -import subprocess import nibabel as nb import numpy as np @@ -18,7 +17,6 @@ from dipy.io.utils import nifti1_symmat from dipy.reconst import dki, dti, mapmri from dipy.segment.mask import median_otsu -from nilearn.image import load_img from nipype import logging from nipype.interfaces.base import ( BaseInterfaceInputSpec, @@ -31,7 +29,6 @@ from nipype.utils.filemanip import fname_presuffix from pkg_resources import resource_filename as pkgr -from .. import config from ..interfaces.mrtrix import _convert_fsl_to_mrtrix from ..utils.brainsuite_shore import BrainSuiteShoreModel, brainsuite_shore_basis from .converters import ( @@ -39,130 +36,11 @@ amplitudes_to_sh_mif, get_dsi_studio_ODF_geometry, ) -from .denoise import ( - SeriesPreprocReport, - SeriesPreprocReportInputSpec, - SeriesPreprocReportOutputSpec, -) -from .patch2self import patch2self LOGGER = logging.getLogger("nipype.interface") TAU_DEFAULT = 1.0 / (4 * np.pi**2) -def popen_run(arg_list): - cmd = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = cmd.communicate() - config.loggers.interface.info(out) - config.loggers.interface.info(err) - - -class Patch2SelfInputSpec(SeriesPreprocReportInputSpec): - in_file = File(exists=True, mandatory=True, desc="4D diffusion MRI data file") - patch_radius = traits.Either(traits.Int(0), traits.Str("auto"), desc="patch radius in voxels.") - bval_file = File(exists=True, mandatory=True, desc="bval file containing b-values") - model = traits.Str("ols", usedefault=True, desc="Regression model for Patch2Self") - alpha = traits.Float(1.0, usedefault=True, desc="Regularization parameter for Ridge and Lasso") - b0_threshold = traits.Float(50.0, usedefault=True, desc="Threshold to segregate b0s") - mask = File(desc="mask image (unused)") - b0_denoising = traits.Bool(True, usedefault=True, desc="denoise the b=0 images too") - clip_negative_vals = traits.Bool( - False, usedefault=True, desc="Sets negative values after denoising to 0" - ) - shift_intensity = traits.Bool( - True, - usedefault=True, - desc="Shifts the distribution of intensities per " "volume to give non-negative values", - ) - out_report = File( - "patch2self_report.svg", usedefault=True, desc="filename for the visual report" - ) - - -class Patch2SelfOutputSpec(SeriesPreprocReportOutputSpec): - out_file = File(exists=True, desc="Denoised version of the input image") - noise_image = File(exists=True, desc="Residuals depicting suppressed noise") - - -class Patch2Self(SeriesPreprocReport, SimpleInterface): - input_spec = Patch2SelfInputSpec - output_spec = Patch2SelfOutputSpec - - def _run_interface(self, runtime): - - in_file = self.inputs.in_file - bval_file = self.inputs.bval_file - denoised_file = fname_presuffix( - in_file, suffix="_denoised_patch2self", newpath=runtime.cwd - ) - noise_file = fname_presuffix( - in_file, suffix="_denoised_residuals_patch2self", newpath=runtime.cwd - ) - noisy_img = nb.load(in_file) - noisy_arr = noisy_img.get_fdata() - bvals = np.loadtxt(bval_file) - - # Determine the patch radius - num_non_b0 = (bvals > self.inputs.b0_threshold).sum() - very_few_directions = num_non_b0 < 20 - few_directions = num_non_b0 < 50 - if self.inputs.patch_radius == "auto": - if very_few_directions: - patch_radius = [3, 3, 3] - elif few_directions: - patch_radius = [1, 1, 1] - else: - patch_radius = [0, 0, 0] - else: - patch_radius = [self.inputs.patch_radius] * 3 - if self.inputs.patch_radius > 3 and not very_few_directions: - LOGGER.info( - "a very large patch radius is not necessary when more than " - "20 gradient directions have been sampled." - ) - elif self.inputs.patch_radius > 1 and not few_directions: - LOGGER.info( - "a large patch radius is not necessary when more than " - "50 gradient directions have been sampled." - ) - elif self.inputs.patch_radius == 0 and few_directions: - LOGGER.warning( - "When < 50 gradient directions are available, it is " - "recommended to increase patch_radius to > 0." - ) - - denoised_arr, noise_residuals = patch2self( - noisy_arr, - bvals, - model=self.inputs.model, - alpha=self.inputs.alpha, - patch_radius=patch_radius, - b0_threshold=self.inputs.b0_threshold, - verbose=True, - b0_denoising=self.inputs.b0_denoising, - clip_negative_vals=self.inputs.clip_negative_vals, - shift_intensity=self.inputs.shift_intensity, - ) - - # Back to nifti - denoised_img = nb.Nifti1Image(denoised_arr, noisy_img.affine, noisy_img.header) - p2s_residuals = nb.Nifti1Image(noise_residuals, noisy_img.affine, noisy_img.header) - denoised_img.to_filename(denoised_file) - p2s_residuals.to_filename(noise_file) - self._results["out_file"] = denoised_file - self._results["noise_image"] = noise_file - return runtime - - def _get_plotting_images(self): - input_dwi = load_img(self.inputs.in_file) - outputs = self._list_outputs() - ref_name = outputs.get("out_file") - denoised_nii = load_img(ref_name) - noise_name = outputs["noise_image"] - noisenii = load_img(noise_name) - return input_dwi, denoised_nii, noisenii - - class DipyReconInputSpec(BaseInterfaceInputSpec): bval_file = File(exists=True, mandatory=True) bvec_file = File(exists=True, mandatory=True) diff --git a/qsirecon/interfaces/dsi_studio.py b/qsirecon/interfaces/dsi_studio.py index 0568f8e7..f4c2ec28 100644 --- a/qsirecon/interfaces/dsi_studio.py +++ b/qsirecon/interfaces/dsi_studio.py @@ -5,7 +5,6 @@ from copy import deepcopy from glob import glob from pathlib import Path -from subprocess import PIPE, Popen import nibabel as nb import nipype.interfaces.utility as niu @@ -122,48 +121,6 @@ def _list_outputs(self): return outputs -class _DSIStudioQCOutputSpec(TraitedSpec): - qc_txt = File(exists=True, desc="Text file with QC measures") - - -class DSIStudioQC(SimpleInterface): - output_spec = _DSIStudioQCOutputSpec - - def _run_interface(self, runtime): - # DSI Studio (0.12.2) action=qc has two modes, depending on wether the - # input is a file (src.gz|nii.gz)|(fib.gz) or a directory. For - # directories, the action will be run on a number of detected files - # (which *cannot* be symbolic links for some reason). - src_file = fname_presuffix(self.inputs.src_file, newpath=runtime.cwd) - cmd = ["dsi_studio", "--action=qc", "--source=" + src_file] - proc = Popen(cmd, cwd=runtime.cwd, stdout=PIPE, stderr=PIPE) - out, err = proc.communicate() - if out: - LOGGER.info(out.decode()) - if err: - LOGGER.critical(err.decode()) - self._results["qc_txt"] = op.join(runtime.cwd, "qc.txt") - return runtime - - -class _DSIStudioSrcQCInputSpec(DSIStudioCommandLineInputSpec): - src_file = File(exists=True, copyfile=False, argstr="%s", desc="DSI Studio src[.gz] file") - - -class DSIStudioSrcQC(DSIStudioQC): - input_spec = _DSIStudioSrcQCInputSpec - ext = ".src.gz" - - -class _DSIStudioFibQCInputSpec(DSIStudioCommandLineInputSpec): - src_file = File(exists=True, copyfile=False, argstr="%s", desc="DSI Studio fib[.gz] file") - - -class DSIStudioFibQC(DSIStudioQC): - input_spec = _DSIStudioFibQCInputSpec - ext = ".fib.gz" - - # Step 2 reonstruct ODFs class DSIStudioReconstructionInputSpec(DSIStudioCommandLineInputSpec): input_src_file = File( @@ -683,57 +640,6 @@ def _run_interface(self, runtime): return runtime -class _DSIStudioQCMergeInputSpec(BaseInterfaceInputSpec): - src_qc = File(exists=True, mandatory=True) - fib_qc = File(exists=True, mandatory=True) - - -class _DSIStudioQCMergeOutputSpec(TraitedSpec): - qc_file = File(exists=True) - - -class DSIStudioMergeQC(SimpleInterface): - input_spec = _DSIStudioQCMergeInputSpec - output_spec = _DSIStudioQCMergeOutputSpec - - def _run_interface(self, runtime): - output_csv = runtime.cwd + "/merged_qc.csv" - src_qc = load_src_qc_file(self.inputs.src_qc) - fib_qc = load_fib_qc_file(self.inputs.fib_qc) - src_qc.update(fib_qc) - qc_df = pd.DataFrame(src_qc) - qc_df.to_csv(output_csv, index=False) - self._results["qc_file"] = output_csv - return runtime - - -class _DSIStudioBTableInputSpec(BaseInterfaceInputSpec): - bval_file = File(exists=True, mandatory=True) - bvec_file = File(exists=True, mandatory=True) - bvec_convention = traits.Enum( - ("DIPY", "FSL"), - usedefault=True, - desc="Convention used for bvecs. FSL assumes LAS+ no matter image orientation", - ) - - -class _DSIStudioBTableOutputSpec(TraitedSpec): - btable_file = File(exists=True) - - -class DSIStudioBTable(SimpleInterface): - input_spec = _DSIStudioBTableInputSpec - output_spec = _DSIStudioBTableOutputSpec - - def _run_interface(self, runtime): - if self.inputs.bvec_convention != "DIPY": - raise NotImplementedError("Only DIPY Bvecs supported for now") - btab_file = op.join(runtime.cwd, "btable.txt") - btable_from_bvals_bvecs(self.inputs.bval_file, self.inputs.bvec_file, btab_file) - self._results["btable_file"] = btab_file - return runtime - - class _AutoTrackInputSpec(DSIStudioCommandLineInputSpec): fib_file = File(exists=True, mandatory=True, copyfile=False, argstr="--source=%s") map_file = File(exists=True, copyfile=False) @@ -885,53 +791,6 @@ def stat_txt_to_df(stat_txt_file, bundle_name): return bundle_stats -def load_src_qc_file(fname, prefix=""): - with open(fname, "r") as qc_file: - qc_data = qc_file.readlines() - data = qc_data[1] - parts = data.strip().split("\t") - dwi_contrast = np.nan - ndc_masked = np.nan - if len(parts) == 7: - _, dims, voxel_size, dirs, max_b, ndc, bad_slices = parts - elif len(parts) == 8: - _, dims, voxel_size, dirs, max_b, _, ndc, bad_slices = parts - elif len(parts) == 9: - _, dims, voxel_size, dirs, max_b, dwi_contrast, ndc, ndc_masked, bad_slices = parts - else: - raise Exception("Unknown QC File format") - - voxelsx, voxelsy, voxelsz = map(float, voxel_size.strip().split()) - dimx, dimy, dimz = map(float, dims.strip().split()) - n_dirs = float(dirs.split("/")[1]) - max_b = float(max_b) - dwi_corr = float(ndc) - n_bad_slices = float(bad_slices) - ndc_masked = float(ndc_masked) - dwi_contrast = float(dwi_contrast) - data = { - prefix + "dimension_x": [dimx], - prefix + "dimension_y": [dimy], - prefix + "dimension_z": [dimz], - prefix + "voxel_size_x": [voxelsx], - prefix + "voxel_size_y": [voxelsy], - prefix + "voxel_size_z": [voxelsz], - prefix + "max_b": [max_b], - prefix + "neighbor_corr": [dwi_corr], - prefix + "masked_neighbor_corr": [ndc_masked], - prefix + "dwi_contrast": [dwi_contrast], - prefix + "num_bad_slices": [n_bad_slices], - prefix + "num_directions": [n_dirs], - } - return data - - -def load_fib_qc_file(fname): - with open(fname, "r") as fibqc_f: - lines = [line.strip().split() for line in fibqc_f] - return {"coherence_index": [float(lines[1][-1])]} - - def btable_from_bvals_bvecs(bval_file, bvec_file, output_file): """Create a b-table from DIPY-style bvals/bvecs. diff --git a/qsirecon/interfaces/dwi_merge.py b/qsirecon/interfaces/dwi_merge.py deleted file mode 100644 index fe1b7e5f..00000000 --- a/qsirecon/interfaces/dwi_merge.py +++ /dev/null @@ -1,907 +0,0 @@ -"""Handle merging and spliting of DSI files.""" - -import json -import os.path as op - -import nibabel as nb -import numpy as np -import pandas as pd -from nilearn.image import concat_imgs, index_img, iter_img, load_img, math_img -from nipype import logging -from nipype.interfaces import ants -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - InputMultiObject, - SimpleInterface, - TraitedSpec, - isdefined, - traits, -) -from nipype.utils.filemanip import fname_presuffix - -from ..workflows.dwi.util import _get_concatenated_bids_name -from .fmap import get_distortion_grouping - -LOGGER = logging.getLogger("nipype.workflow") -MAX_COMBINED_SCANS = 100 - - -class MergeDWIsInputSpec(BaseInterfaceInputSpec): - dwi_files = InputMultiObject(File(), mandatory=True, desc="list of dwi files") - bids_dwi_files = InputMultiObject( - File(), mandatory=True, desc="list of original (BIDS) dwi files" - ) - bval_files = InputMultiObject(File(exists=True), mandatory=True, desc="list of bval files") - bvec_files = InputMultiObject(File(exists=True), mandatory=True, desc="list of bvec files") - b0_threshold = traits.Int(100, usedefault=True, desc="Maximum b=0 value") - denoising_confounds = InputMultiObject( - File(exists=True, desc="list of confound files associated with each input dwi") - ) - harmonize_b0_intensities = traits.Bool( - True, usedefault=True, desc="Force scans to have the same mean b=0 intensity" - ) - raw_concatenated_files = InputMultiObject( - File(), mandatory=False, desc="list of raw concatenated images" - ) - b0_refs = InputMultiObject(File(), mandatory=False, desc="list of b=0 reference images") - carpetplot_data = InputMultiObject( - File(exists=True), mandatory=False, desc="list of carpetplot_data files" - ) - scan_metadata = traits.Dict(desc="Dict of metadata for the to-be-combined scans") - - -class MergeDWIsOutputSpec(TraitedSpec): - out_dwi = File(desc="the merged dwi image") - out_bval = File(desc="the merged bval file") - out_bvec = File(desc="the merged bvec file") - original_images = traits.List() - merged_metadata = File(exists=True) - merged_denoising_confounds = File(exists=True) - merged_b0_ref = File(exists=True) - merged_raw_dwi = File(exists=True, mandatory=False) - merged_raw_bvec = File(exists=True, mandatory=False) - merged_carpetplot_data = File(exists=True) - - -class MergeDWIs(SimpleInterface): - input_spec = MergeDWIsInputSpec - output_spec = MergeDWIsOutputSpec - - def _run_interface(self, runtime): - bvals = self.inputs.bval_files - bvecs = self.inputs.bvec_files - num_dwis = len(self.inputs.dwi_files) - - to_concat, b0_means, corrections = harmonize_b0s( - self.inputs.dwi_files, - bvals, - self.inputs.b0_threshold, - self.inputs.harmonize_b0_intensities, - ) - - # Create a merged metadata json file for - if isdefined(self.inputs.scan_metadata): - combined_metadata = combine_metadata( - self.inputs.bids_dwi_files, - self.inputs.scan_metadata, - ) - merged_metadata_file = op.join(runtime.cwd, "merged_metadata.json") - with open(merged_metadata_file, "w") as merged_meta_f: - json.dump(combined_metadata, merged_meta_f, sort_keys=True, indent=4) - self._results["merged_metadata"] = merged_metadata_file - - # Get basic qc / provenance per volume - provenance_df = create_provenance_dataframe( - self.inputs.bids_dwi_files, to_concat, b0_means, corrections - ) - - # Collect the confounds - if isdefined(self.inputs.denoising_confounds): - confounds = [pd.read_csv(fname) for fname in self.inputs.denoising_confounds] - _confounds_df = pd.concat(confounds, axis=0, ignore_index=True) - confounds_df = pd.concat([provenance_df, _confounds_df], axis=1, ignore_index=False) - else: - confounds_df = provenance_df - - # Load the gradient information - all_bvals = combined_bval_array(self.inputs.bval_files) - all_bvecs = combined_bvec_array(self.inputs.bvec_files) - confounds_df["original_bval"] = all_bvals - confounds_df["original_bx"] = all_bvecs[0] - confounds_df["original_by"] = all_bvecs[1] - confounds_df["original_bz"] = all_bvecs[2] - confounds_df = confounds_df.loc[:, ~confounds_df.columns.duplicated()] - - # Concatenate the gradient information - if num_dwis > 1: - merged_output = _get_concatenated_bids_name( - {"dwi_series": self.inputs.dwi_files, "fieldmap_info": {"suffix": None}} - ) - merged_fname = op.join(runtime.cwd, merged_output + "_merged.nii.gz") - out_bval = fname_presuffix( - merged_fname, suffix=".bval", use_ext=False, newpath=runtime.cwd - ) - out_bvec = fname_presuffix( - merged_fname, suffix=".bvec", use_ext=False, newpath=runtime.cwd - ) - else: - merged_fname = self.inputs.dwi_files[0] - out_bval = bvals[0] - out_bvec = bvecs[0] - - merged_confounds = fname_presuffix( - merged_fname, suffix="_confounds.csv", use_ext=False, newpath=runtime.cwd - ) - confounds_df = confounds_df.drop("Unnamed: 0", axis=1, errors="ignore") - confounds_df.to_csv(merged_confounds, index=False) - - self._results["merged_denoising_confounds"] = merged_confounds - self._results["original_images"] = confounds_df["original_file"].tolist() - self._results["out_dwi"] = merged_fname - self._results["out_bval"] = out_bval - self._results["out_bvec"] = out_bvec - - # If one and only one carpetplot data was specified, add it to outputs - if len(self.inputs.carpetplot_data) > 1: - raise NotImplementedError("Can't handle multiple carpetplots in merging") - if len(self.inputs.carpetplot_data) == 1: - self._results["merged_carpetplot_data"] = self.inputs.carpetplot_data[0] - - if num_dwis == 1: - return runtime - - # Write the merged gradients - combine_bvals(bvals, output_file=out_bval) - combine_bvecs(bvecs, output_file=out_bvec) - # Concatenate into a single file - merged_nii = concat_imgs(to_concat, auto_resample=True) - # Remove any negative values introduced during interpolation (if it occurrs) - pos_merged_nii = math_img("np.clip(img, 0, None)", img=merged_nii) - pos_merged_nii.to_filename(merged_fname) - - return runtime - - -def combine_metadata(scan_list, metadata_dict, merge_method="first"): - """Create a merged metadata dictionary. - - Most importantly, combine the slice timings in some way. - - Parameters - ---------- - scan_list: list - List of BIDS inputs in the order in which they'll be concatenated - medadata_dict: dict - Mapping keys (values in ``scan_list``) to BIDS metadata dictionaries - merge_method: str - How to combine the metadata when multiple scans are being concatenated. - If "first" the metadata from the first scan is selected. Someday other - methods like "average" may be added. - - Returns - ------- - metadata: dict - A BIDS metadata dictionary - - """ - if merge_method == "first": - return metadata_dict[scan_list[0]] - raise NotImplementedError(f"merge_method '{merge_method}' is not implemented") - - -class AveragePEPairsInputSpec(MergeDWIsInputSpec): - original_bvec_files = InputMultiObject( - File(exists=True), mandatory=True, desc="list of original bvec files" - ) - carpetplot_data = InputMultiObject( - File(exists=True), mandatory=True, desc="list of carpetplot_data files" - ) - verbose = traits.Bool(False, usedefault=True) - - -class AveragePEPairsOutputSpec(MergeDWIsOutputSpec): - merged_raw_concatenated = File(exists=True) - - -class AveragePEPairs(SimpleInterface): - input_spec = AveragePEPairsInputSpec - output_spec = AveragePEPairsOutputSpec - - def _run_interface(self, runtime): - distortion_groups, assignments = get_distortion_grouping(self.inputs.bids_dwi_files) - num_distortion_groups = len(distortion_groups) - if not num_distortion_groups == 2: - raise Exception( - "Unable to merge using strategy 'average': exactly" - " two distortion groups must be present in data." - " Found %d" % num_distortion_groups - ) - - # Get the gradient info for each PE group - original_bvecs = combined_bvec_array(self.inputs.original_bvec_files) - rotated_bvecs = combined_bvec_array(self.inputs.bvec_files) - bvals = combined_bval_array(self.inputs.bval_files) - - # Find which images should be averaged together in the o - # Also, average the carpetplot matrices and motion params - image_pairs, averaged_raw_bvec = find_image_pairs(original_bvecs, bvals, assignments) - ( - combined_images, - combined_raw_images, - combined_bvals, - combined_bvecs, - error_report, - avg_carpetplot, - ) = average_image_pairs( - image_pairs, - self.inputs.dwi_files, - rotated_bvecs, - bvals, - self.inputs.denoising_confounds, - self.inputs.raw_concatenated_files, - self.inputs.carpetplot_data, - verbose=self.inputs.verbose, - ) - - # Save the averaged outputs - out_dwi_path = op.join(runtime.cwd, "averaged_pairs.nii.gz") - combined_images.to_filename(out_dwi_path) - self._results["out_dwi"] = out_dwi_path - out_bval_path = op.join(runtime.cwd, "averaged_pairs.bval") - self._results["out_bval"] = combine_bvals(combined_bvals, out_bval_path) - out_bvec_path = op.join(runtime.cwd, "averaged_pairs.bvec") - self._results["out_bvec"] = combine_bvecs(combined_bvecs, out_bvec_path) - out_confounds_path = op.join(runtime.cwd, "averaged_pairs_confounds.tsv") - error_report.to_csv(out_confounds_path, index=False, sep="\t") - self._results["merged_denoising_confounds"] = out_confounds_path - self._results["original_images"] = self.inputs.bids_dwi_files - - # Write the merged carpetplot data - out_carpetplot_path = op.join(runtime.cwd, "merged_carpetplot.json") - with open(out_carpetplot_path, "w") as carpet_f: - json.dump(avg_carpetplot, carpet_f) - self._results["merged_carpetplot_data"] = out_carpetplot_path - - # write the averaged raw data - out_raw_concatenated = op.join(runtime.cwd, "merged_raw.nii.gz") - self._results["merged_raw_dwi"] = out_raw_concatenated - combined_raw_images.to_filename(out_raw_concatenated) - out_raw_bvec = op.join(runtime.cwd, "merged_raw.bvec") - self._results["merged_raw_bvec"] = combine_bvecs(averaged_raw_bvec, out_raw_bvec) - - # Make a new b=0 template - b0_indices = np.flatnonzero(bvals < self.inputs.b0_threshold) - b0_ref = ants.AverageImages( - dimension=3, normalize=True, images=[self.inputs.dwi_files[idx] for idx in b0_indices] - ) - result = b0_ref.run() - self._results["merged_b0_ref"] = result.outputs.output_average_image - - return runtime - - -class _SplitResampledDWIsInputSpec(BaseInterfaceInputSpec): - dwi_file = File(exists=True, mandatory=True) - bval_file = File(exists=True, mandatory=True) - bvec_file = File(exists=True, mandatory=True) - confounds = File(exists=True, mandatory=True) - n_images = traits.Int(1) - - -class _SplitResampledDWIsOutputSpec(TraitedSpec): - pass - - -# Add slots for the possibly -for subscan in np.arange(MAX_COMBINED_SCANS) + 1: - _SplitResampledDWIsOutputSpec.add_class_trait("dwi_file_%d" % subscan, File(exists=True)) - _SplitResampledDWIsOutputSpec.add_class_trait("bval_file_%d" % subscan, File(exists=True)) - _SplitResampledDWIsOutputSpec.add_class_trait("bvec_file_%d" % subscan, File(exists=True)) - _SplitResampledDWIsOutputSpec.add_class_trait("source_file_%d" % subscan, traits.Str()) - - -class SplitResampledDWIs(SimpleInterface): - input_spec = _SplitResampledDWIsInputSpec - output_spec = _SplitResampledDWIsOutputSpec - - def _run_interface(self, runtime): - # Load the confounds - confounds_df = pd.read_csv(self.inputs.confounds, sep="\t") - original_files = confounds_df["original_file"].unique().tolist() - if not len(original_files) == self.inputs.n_images: - raise Exception( - "Found %d files in confounds file, but expected %d" - % (len(original_files), self.inputs.n_images) - ) - resampled_img = load_img(self.inputs.dwi_file) - for file_num, original_file in enumerate(original_files, start=1): - image_indices = np.flatnonzero( - (confounds_df["original_file"] == original_file).to_numpy() - ) - dwi_subfile = fname_presuffix( - original_file, - prefix="resampled_", - suffix=".nii.gz", - use_ext=False, - newpath=runtime.cwd, - ) - bval_subfile = dwi_subfile.replace(".nii.gz", ".bval") - bvec_subfile = dwi_subfile.replace(".nii.gz", ".bvec") - index_img(resampled_img, image_indices).to_filename(dwi_subfile) - subset_bvals(self.inputs.bval_file, image_indices, bval_subfile) - subset_bvecs(self.inputs.bvec_file, image_indices, bvec_subfile) - - self._results["dwi_file_%d" % file_num] = dwi_subfile - self._results["bval_file_%d" % file_num] = bval_subfile - self._results["bvec_file_%d" % file_num] = bvec_subfile - self._results["source_file_%d" % file_num] = original_file - return runtime - - -class _MergeFinalConfoundsInputSpec(BaseInterfaceInputSpec): - confounds = File(exists=True, mandatory=True) - bias_correction_confounds = InputMultiObject(File(exists=True), mandatory=False) - patch2self_correction_confounds = File(exists=True, mandatory=False) - - -class _MergeFinalConfoundsOutputSpec(TraitedSpec): - confounds = File(exists=True) - - -class MergeFinalConfounds(SimpleInterface): - input_spec = _MergeFinalConfoundsInputSpec - output_spec = _MergeFinalConfoundsOutputSpec - - def _run_interface(self, runtime): - - to_concat_horizontally = [] - # New confounds from bias correction - if isdefined(self.inputs.bias_correction_confounds): - # There may be multuple files that need to be vertically stacked - biascorrection_df = pd.concat( - [pd.read_csv(bc_csv) for bc_csv in self.inputs.bias_correction_confounds], - axis=0, - ignore_index=True, - ) - to_concat_horizontally.append(biascorrection_df) - # New confounds from patch2self - if isdefined(self.inputs.patch2self_correction_confounds): - to_concat_horizontally.append(pd.read_csv(self.inputs.patch2self_correction_confounds)) - - # If we have new ones, append the columns, prefixed by "final_" - if to_concat_horizontally: - new_confounds_file = fname_presuffix( - self.inputs.confounds, newpath=runtime.cwd, prefix="final_" - ) - original_confounds = pd.read_csv(self.inputs.confounds, sep="\t") - extra_confounds = pd.concat(to_concat_horizontally, axis=1) - extra_confounds.columns = ["final_" + col for col in extra_confounds.columns.tolist()] - final_confounds = pd.concat([original_confounds, extra_confounds], axis=1) - final_confounds.to_csv(new_confounds_file, sep="\t", index=False) - self._results["confounds"] = new_confounds_file - else: - self._results["confounds"] = self.inputs.confounds - - return runtime - - -def find_image_pairs(original_bvecs, bvals, assignments): - assignments = np.array(assignments) - group1_mask = assignments == 1 - group2_mask = assignments == 2 - image_nums = np.arange(len(assignments)) - group1 = { - "bvals": bvals[group1_mask], - "original_bvecs": original_bvecs[:, group1_mask], - "indices": image_nums[group1_mask], - } - group2 = { - "bvals": bvals[group2_mask], - "original_bvecs": original_bvecs[:, group2_mask], - "indices": image_nums[group2_mask], - } - - # If this is HCP-style, the bvals and bvecs will match directly - if not group2["bvals"].shape == group1["bvals"].shape: - raise Exception("Unable to perform HCP-style merge, different numbers of images") - if np.allclose(group2["bvals"], group1["bvals"], atol=50) and np.allclose( - group2["original_bvecs"], group1["original_bvecs"], atol=0.0001 - ): - pairs = list(zip(group1["indices"], group2["indices"])) - bvecs = group1["original_bvecs"] - else: - raise Exception("Bvecs do not match - ensure matching bvecs") - - return pairs, bvecs - - -def unit_vector(vector): - """Returns the unit vector of the vector.""" - return vector / np.linalg.norm(vector) - - -def angle_between(v1, v2): - """Returns the angle in degrees between vectors 'v1' and 'v2':: - >>> angle_between((1, 0, 0), (0, 1, 0)) - 90.0 - >>> angle_between((1, 0, 0), (1, 0, 0)) - 0.0 - >>> angle_between((1, 0, 0), (-1, 0, 0)) - 180.0 - """ - v1_u = unit_vector(v1) - v2_u = unit_vector(v2) - return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) * 180 / np.pi - - -def average_image_pairs( - image_pairs, - image_paths, - rotated_bvecs, - bvals, - confounds_tsvs, - raw_concatenated_files, - carpetplots, - verbose=False, -): - """Create 4D series of averaged images, gradients, and confounds""" - averaged_images = [] - new_bvecs = [] - confounds = pd.concat([pd.read_csv(fname, delimiter="\t") for fname in confounds_tsvs]) - merged_confounds = [] - merged_bvals = [] - - # Load the raw concatenated images for qc - raw_concatenated_img = concat_imgs(raw_concatenated_files) - raw_averaged_images = [] - - confounds1_rename = {col: col + "_1" for col in confounds.columns} - confounds2_rename = {col: col + "_2" for col in confounds.columns} - for index1, index2 in image_pairs: - confounds1 = confounds.iloc[index1].copy().rename(confounds1_rename) - confounds2 = confounds.iloc[index2].copy().rename(confounds2_rename) - # Make a single row containing both 1 and 2 - confounds_both = confounds1.append(confounds2) - averaged_images.append(math_img("(a+b)/2", a=image_paths[index1], b=image_paths[index2])) - raw_averaged_images.append( - math_img("(a[..., %d] + a[..., %d]) / 2" % (index1, index2), a=raw_concatenated_img) - ) - - new_bval = (bvals[index1] + bvals[index2]) / 2.0 - merged_bvals.append(new_bval) - rotated1 = rotated_bvecs[:, index1] - rotated2 = rotated_bvecs[:, index2] - new_bvec, bvec_error = average_bvec(rotated1, rotated2) - new_bvecs.append(new_bvec) - - confounds_both["vec_averaging_error"] = bvec_error - confounds_both["rotated_grad_x_1"] = rotated1[0] - confounds_both["rotated_grad_y_1"] = rotated1[1] - confounds_both["rotated_grad_z_1"] = rotated1[2] - confounds_both["rotated_grad_x_2"] = rotated2[0] - confounds_both["rotated_grad_y_2"] = rotated2[1] - confounds_both["rotated_grad_z_2"] = rotated2[2] - confounds_both["grad_x"] = new_bvec[0] - confounds_both["grad_y"] = new_bvec[1] - confounds_both["grad_z"] = new_bvec[2] - confounds_both["bval"] = new_bval - merged_confounds.append(confounds_both) - if verbose: - print( - "%d: %d [%.4fdeg error]\n\t%d (%.4f %.4f %.4f)" - % (index1, index2, bvec_error, new_bval, new_bvec[0], new_bvec[1], new_bvec[2]) - ) - - # Make columns that can be used in the interactive report - averaged_confounds = pd.DataFrame(merged_confounds) - needed_for_interactive_report = [ - "trans_x", - "trans_y", - "trans_z", - "rot_x", - "rot_y", - "rot_z", - "framewise_displacement", - ] - for key in needed_for_interactive_report: - confs1, confs2 = averaged_confounds[[key + "_1", key + "_2"]].to_numpy().T - averaged_confounds[key] = get_worst(confs1, confs2) - - # Original file is actually two files! - averaged_confounds["original_file"] = averaged_confounds[ - ["original_file_1", "original_file_2"] - ].agg("+".join, axis=1) - - # Get the averaged carpetplot data for the interactive report - averaged_carpetplot = average_carpetplots(carpetplots, np.array(image_pairs)) - return ( - concat_imgs(averaged_images), - concat_imgs(raw_averaged_images), - np.array(merged_bvals), - np.array(new_bvecs), - averaged_confounds, - averaged_carpetplot, - ) - - -def get_worst(values1, values2): - """finds the highest magnitude value per index in values1, values2""" - values = np.column_stack([values1, values2]) - highest_index = np.argmax(np.abs(values), axis=1) - return values[np.arange(values.shape[0]), highest_index] - - -def average_carpetplots(carpet_list, image_pairs): - """Averages carpetplot data for display when pe pairs are averaged. - - Reminder: incoming data is a dict of - {"carpetplot": [[one image's slice scores], - [next image's slice scores], - ... - [last image's slice scores]]} - and the image_pairs should be a n x 2 matrix where the columns - are the first image index and second image index. - - """ - if not isinstance(carpet_list, list) and len(carpet_list) == 1: - raise Exception("Not implemented for SHORELine") - carpet_path = carpet_list[0] - with open(carpet_path, "r") as carpet_f: - carpet_dict = json.load(carpet_f) - carpet_data = np.array(carpet_dict["carpetplot"]) - worst_rows = [] - for index1, index2 in image_pairs: - worst_rows.append(get_worst(carpet_data[index1], carpet_data[index2]).tolist()) - return {"carpetplot": worst_rows} - - -def average_bvec(bvec1, bvec2): - # return straight away if the bvecs are identical - # This prevents comparison of zero vectors - if (bvec1 == bvec2).all(): - return np.copy(bvec1), 0.0 - - bvec_diff = angle_between(bvec1, bvec2) - - mean_bvec_plus = (bvec1 + bvec2) / 2.0 - mean_bvec_plus = mean_bvec_plus / np.linalg.norm(mean_bvec_plus) - mean_bvec_minus = (bvec1 - bvec2) / 2.0 - mean_bvec_minus = mean_bvec_minus / np.linalg.norm(mean_bvec_minus) - - if angle_between(bvec1, mean_bvec_plus) < angle_between( - bvec1, mean_bvec_minus - ) and angle_between(bvec2, mean_bvec_plus) < angle_between(bvec2, mean_bvec_minus): - return mean_bvec_plus, bvec_diff - if angle_between(bvec1, mean_bvec_plus) > angle_between( - bvec1, mean_bvec_minus - ) and angle_between(bvec2, mean_bvec_plus) < angle_between(bvec2, mean_bvec_minus): - return mean_bvec_minus, bvec_diff - LOGGER.warning("Ambiguous direcions of vectors: assuming plus") - return mean_bvec_plus, bvec_diff - - -class StackConfoundsInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiObject(File(exists=True), mandatory=True) - axis = traits.Enum(0, 1, default=0, usedefault=True) - out_file = File() - - -class StackConfoundsOutputSpec(TraitedSpec): - confounds_file = File(desc="the stacked confound data") - - -class StackConfounds(SimpleInterface): - input_spec = StackConfoundsInputSpec - output_spec = StackConfoundsOutputSpec - - def _run_interface(self, runtime): - if not self.inputs.in_files: - return runtime - dfs = [pd.read_csv(fname) for fname in self.inputs.in_files] - stacked = pd.concat(dfs, axis=self.inputs.axis, ignore_index=self.inputs.axis == 0) - out_file = op.join(runtime.cwd, "confounds.csv") - stacked = stacked.drop("Unnamed: 0", axis=1, errors="ignore") - stacked.to_csv(out_file) - self._results["confounds_file"] = out_file - return runtime - - -def subset_bvals(bval_file, indices, out_bval_file): - original_bvals = np.loadtxt(bval_file) - bval_subset = original_bvals[indices] - np.savetxt(out_bval_file, bval_subset, fmt=str("%i")) - - -def subset_bvecs(bvec_file, indices, out_bvec_file): - original_bvecs = np.loadtxt(bvec_file) - bvec_subset = original_bvecs[:, indices] - np.savetxt(out_bvec_file, bvec_subset, fmt=str("%.8f")) - - -def combined_bval_array(bval_files): - collected_vals = [] - for bval_file in bval_files: - if isinstance(bval_file, str): - collected_vals.append(np.atleast_1d(np.loadtxt(bval_file))) - else: - collected_vals.append(np.atleast_1d(bval_file)) - return np.concatenate(collected_vals) - - -def combine_bvals(bvals, output_file="restacked.bval"): - """Load, merge and save fsl-style bvals files.""" - final_bvals = combined_bval_array(bvals) - np.savetxt(output_file, final_bvals, fmt=str("%i")) - return op.abspath(output_file) - - -def combined_bvec_array(bvec_files): - collected_vecs = [] - for bvec_file in bvec_files: - if isinstance(bvec_file, str): - collected_vecs.append(np.loadtxt(bvec_file)) - else: - collected_vecs.append(bvec_file) - return np.column_stack(collected_vecs) - - -def combine_bvecs(bvecs, output_file="restacked.bvec"): - """Load, merge and save fsl-style bvecs files.""" - final_bvecs = combined_bvec_array(bvecs) - np.savetxt(output_file, final_bvecs, fmt=str("%.8f")) - return op.abspath(output_file) - - -def get_nvols(img): - """Returns the number of volumes in a 3/4D nifti file.""" - shape = img.shape - if len(shape) < 4: - return 1 - return shape[3] - - -def harmonize_b0s(dwi_files, bvals, b0_threshold, harmonize_b0s): - """Find the mean intensity of b=0 images in a dwi file and calculate corrections. - - Parameters - ---------- - - dwi_files: list - List of paths to dwi Nifti files that will be concatenated - bvals: list - List of paths to bval files corresponding to the files in ``dwi_files`` - b0_threshold: int - maximum b values for an image to be considered a b=0 - harmonize_b0s: bool - Apply a correction to each image so that their mean b=0 images are equal - - Returns - ------- - to_concat: list - List of NiftiImage objects to be concatenated. May have been harmonized. - Same length as the input ``dwi_files``. - corrections: list - The correction that would be applied to each image to harmonize their b=0's. - Same length as the input ``dwi_files``. - - """ - # Load the dwi data and get the mean values from the b=0 images - num_dwis = len(dwi_files) - dwi_niis = [] - b0_means = [] - for dwi_file, bval_file in zip(dwi_files, bvals): - dwi_nii = load_img(dwi_file) - _bvals = np.loadtxt(bval_file) - b0_indices = np.flatnonzero(_bvals < b0_threshold) - if b0_indices.size == 0: - b0_mean = np.nan - else: - if len(b0_indices) > 1: - b0_mean = index_img(dwi_nii, b0_indices).get_fdata().mean() - else: - b0_mean = dwi_nii.get_fdata().mean() - b0_means.append(b0_mean) - dwi_niis.append(dwi_nii) - - # Apply the b0 harmonization if requested - if harmonize_b0s: - b0_all_mean = np.nanmean(b0_means) - corrections = b0_all_mean / np.array(b0_means) - harmonized_niis = [] - for nii_img, correction in zip(dwi_niis, corrections): - if np.isnan(b0_mean): - harmonized_niis.append(nii_img) - LOGGER.warning("An image has no b=0 images and cannot be harmonized") - else: - harmonized_niis.append(math_img("img*%.32f" % correction, img=nii_img)) - to_concat = harmonized_niis - else: - to_concat = dwi_niis - corrections = np.ones(num_dwis) - - return to_concat, b0_means, corrections - - -def create_provenance_dataframe( - bids_sources, harmonized_niis, b0_means, harmonization_corrections -): - series_confounds = [] - nvols_per_image = [get_nvols(img) for img in harmonized_niis] - total_vols = np.sum(nvols_per_image) - # Check whether the bids sources are per file or per volume - if not len(bids_sources) == total_vols: - images_per_volume = [] - for source_image, img_nvols in zip(bids_sources, nvols_per_image): - images_per_volume.extend([source_image] * img_nvols) - if not len(images_per_volume) == total_vols: - raise Exception("Mismatch in number of images and BIDS sources") - bids_sources = images_per_volume - - for correction, harmonized_nii, b0_mean, nvols in zip( - harmonization_corrections, harmonized_niis, b0_means, nvols_per_image - ): - series_confounds.append( - pd.DataFrame( - { - "image_mean": [img.get_fdata().mean() for img in iter_img(harmonized_nii)], - "series_b0_mean": [b0_mean] * nvols, - "series_b0_correction": [correction] * nvols, - } - ) - ) - - image_df = pd.concat(series_confounds, axis=0, ignore_index=True) - image_df["original_file"] = bids_sources - return image_df - - -class _PhaseToRadInputSpec(BaseInterfaceInputSpec): - """Output spec for PhaseToRad interface. - - STATEMENT OF CHANGES: This class is derived from sources licensed under the Apache-2.0 terms, - and the code has been changed. - - Notes - ----- - The code is derived from - https://github.com/nipreps/sdcflows/blob/c6cd42944f4b6d638716ce020ffe51010e9eb58a/\ - sdcflows/utils/phasemanip.py#L26. - - License - ------- - ORIGINAL WORK'S ATTRIBUTION NOTICE: - - Copyright 2021 The NiPreps Developers - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - We support and encourage derived works from this project, please read - about our expectations at - - https://www.nipreps.org/community/licensing/ - - """ - - phase_file = File(exists=True, mandatory=True) - - -class _PhaseToRadOutputSpec(TraitedSpec): - """Output spec for PhaseToRad interface. - - STATEMENT OF CHANGES: This class is derived from sources licensed under the Apache-2.0 terms, - and the code has been changed. - - Notes - ----- - The code is derived from - https://github.com/nipreps/sdcflows/blob/c6cd42944f4b6d638716ce020ffe51010e9eb58a/\ - sdcflows/utils/phasemanip.py#L26. - - License - ------- - ORIGINAL WORK'S ATTRIBUTION NOTICE: - - Copyright 2021 The NiPreps Developers - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - We support and encourage derived works from this project, please read - about our expectations at - - https://www.nipreps.org/community/licensing/ - - """ - - phase_file = File(exists=True) - - -class PhaseToRad(SimpleInterface): - """Convert phase image from arbitrary units (au) to radians. - - This method assumes that the phase image's minimum and maximum values correspond to - -pi and pi, respectively, and scales the image to be between 0 and 2*pi. - - STATEMENT OF CHANGES: This class is derived from sources licensed under the Apache-2.0 terms, - and the code has not been changed. - - Notes - ----- - The code is derived from - https://github.com/nipreps/sdcflows/blob/c6cd42944f4b6d638716ce020ffe51010e9eb58a/\ - sdcflows/utils/phasemanip.py#L26. - - License - ------- - ORIGINAL WORK'S ATTRIBUTION NOTICE: - - Copyright 2021 The NiPreps Developers - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - We support and encourage derived works from this project, please read - about our expectations at - - https://www.nipreps.org/community/licensing/ - - """ - - input_spec = _PhaseToRadInputSpec - output_spec = _PhaseToRadOutputSpec - - def _run_interface(self, runtime): - im = nb.load(self.inputs.phase_file) - data = im.get_fdata(caching="unchanged") # Read as float64 for safety - hdr = im.header.copy() - - # Rescale to [0, 2*pi] - data = (data - data.min()) * (2 * np.pi / (data.max() - data.min())) - - # Round to float32 and clip - data = np.clip(np.float32(data), 0.0, 2 * np.pi) - - hdr.set_data_dtype(np.float32) - hdr.set_xyzt_units("mm") - - # Set the output file name - self._results["phase_file"] = fname_presuffix( - self.inputs.phase_file, - suffix="_rad.nii.gz", - newpath=runtime.cwd, - use_ext=False, - ) - - # Save the output image - nb.Nifti1Image(data, None, hdr).to_filename(self._results["phase_file"]) - - return runtime diff --git a/qsirecon/interfaces/eddy.py b/qsirecon/interfaces/eddy.py deleted file mode 100644 index e5a2e77a..00000000 --- a/qsirecon/interfaces/eddy.py +++ /dev/null @@ -1,466 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Prepare files for TOPUP and eddy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -""" -import json -import os -import os.path as op - -import nibabel as nb -import numpy as np -from nilearn.image import index_img -from nipype import logging -from nipype.interfaces import fsl -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - InputMultiObject, - SimpleInterface, - TraitedSpec, - isdefined, - traits, -) -from nipype.utils.filemanip import fname_presuffix, split_filename -from pkg_resources import resource_filename as pkgr_fn - -from .epi_fmap import get_best_b0_topup_inputs_from -from .fmap import eddy_inputs_from_dwi_files - -LOGGER = logging.getLogger("nipype.interface") - - -class GatherEddyInputsInputSpec(BaseInterfaceInputSpec): - dwi_file = File(exists=True) - bval_file = File(exists=True) - bvec_file = File(exists=True) - b0_threshold = traits.CInt(100, usedefault=True) - original_files = InputMultiObject(File(exists=True)) - epi_fmaps = InputMultiObject( - File(exists=True), desc="files from fmaps/ for distortion correction" - ) - topup_max_b0s_per_spec = traits.CInt(1, usedefault=True) - topup_requested = traits.Bool(False, usedefault=True) - raw_image_sdc = traits.Bool(True, usedefault=True) - eddy_config = File(exists=True, mandatory=True) - json_file = File(exists=True) - - -class GatherEddyInputsOutputSpec(TraitedSpec): - topup_datain = File(exists=True) - topup_imain = File(exists=True) - topup_first = File(exists=True) - topup_config = traits.Str() - pre_topup_image = File(exists=True) - eddy_acqp = File(exists=True) - eddy_first = File(exists=True) - b0_csv = File(exists=True) - eddy_indices = File(exists=True) - forward_transforms = traits.List() - forward_warps = traits.List() - topup_report = traits.Str(desc="description of where data came from") - json_file = File(exists=True) - multiband_factor = traits.Int() - - -class GatherEddyInputs(SimpleInterface): - """Manually prepare inputs for TOPUP and eddy. - - **Inputs** - rpe_b0: str - path to a file (3D or 4D) containing b=0 images with the reverse PE direction - dwi_file: str - path to a 4d DWI nifti file - bval_file: str - path to the bval file - bvec_file: str - path to the bvec file - """ - - input_spec = GatherEddyInputsInputSpec - output_spec = GatherEddyInputsOutputSpec - - def _run_interface(self, runtime): - - # Gather inputs for TOPUP - topup_prefix = op.join(runtime.cwd, "topup_") - topup_datain_file, topup_imain_file, topup_text, b0_csv, topup0, eddy0 = ( - get_best_b0_topup_inputs_from( - dwi_file=self.inputs.dwi_file, - bval_file=self.inputs.bval_file, - b0_threshold=self.inputs.b0_threshold, - cwd=runtime.cwd, - bids_origin_files=self.inputs.original_files, - epi_fmaps=self.inputs.epi_fmaps, - max_per_spec=self.inputs.topup_max_b0s_per_spec, - topup_requested=self.inputs.topup_requested, - raw_image_sdc=self.inputs.raw_image_sdc, - ) - ) - self._results["topup_datain"] = topup_datain_file - self._results["topup_imain"] = topup_imain_file - self._results["topup_report"] = topup_text - self._results["b0_csv"] = b0_csv - self._results["topup_first"] = topup0 - self._results["eddy_first"] = eddy0 - - # If there are an odd number of slices, use b02b0_1.cnf - example_b0 = nb.load(self.inputs.dwi_file) - self._results["topup_config"] = "b02b0.cnf" - if 1 in (example_b0.shape[0] % 2, example_b0.shape[1] % 2, example_b0.shape[2] % 2): - LOGGER.warning("Using slower b02b0_1.cnf because an axis has an odd number of slices") - self._results["topup_config"] = pkgr_fn("qsirecon.data", "b02b0_1.cnf") - - # For the apply topup report: - pre_topup_image = index_img(topup_imain_file, 0) - pre_topup_image_file = topup_prefix + "pre_image.nii.gz" - pre_topup_image.to_filename(pre_topup_image_file) - self._results["pre_topup_image"] = pre_topup_image_file - - # Gather inputs for eddy - eddy_prefix = op.join(runtime.cwd, "eddy_") - acqp_file, index_file = eddy_inputs_from_dwi_files(self.inputs.original_files, eddy_prefix) - self._results["eddy_acqp"] = acqp_file - self._results["eddy_indices"] = index_file - - # these have already had HMC, SDC applied - self._results["forward_transforms"] = [] - self._results["forward_warps"] = [] - - # Based on the eddy config, determine whether to send a json argument - with open(self.inputs.eddy_config, "r") as eddy_cfg_f: - eddy_config = json.load(eddy_cfg_f) - # json file is only allowed if mporder is defined - if "mporder" in eddy_config: - self._results["json_file"] = self.inputs.json_file - - return runtime - - -class ExtendedEddyInputSpec(fsl.epi.EddyInputSpec): - num_threads = traits.Int(1, usedefault=True, argstr="--nthr=%d") - - -class ExtendedEddyOutputSpec(fsl.epi.EddyOutputSpec): - shell_PE_translation_parameters = File( - exists=True, desc=("the translation along the PE-direction between the different shells") - ) - outlier_map = File( - exists=True, - desc="All numbers are either 0, meaning that scan-slice " - "is not an outliers, or 1 meaning that it is.", - ) - outlier_n_stdev_map = File( - exists=True, - desc="how many standard deviations off the mean difference " - "between observation and prediction is.", - ) - outlier_n_sqr_stdev_map = File( - exists=True, - desc="how many standard deviations off the square root of the " - "mean squared difference between observation and prediction is.", - ) - outlier_free_data = File( - exists=True, - desc=" the original data given by --imain not corrected for " - "susceptibility or EC-induced distortions or subject movement, but with " - "outlier slices replaced by the Gaussian Process predictions.", - ) - - -class ExtendedEddy(fsl.Eddy): - input_spec = ExtendedEddyInputSpec - output_spec = ExtendedEddyOutputSpec - - def __init__(self, **inputs): - super(ExtendedEddy, self).__init__(**inputs) - self.inputs.on_trait_change(self._use_cuda, "use_cuda") - if isdefined(self.inputs.use_cuda): - self._use_cuda() - - def _use_cuda(self): - self._cmd = "eddy_cuda10.2" if self.inputs.use_cuda else "eddy_cpu" - - def _list_outputs(self): - outputs = self.output_spec().get() - outputs["out_corrected"] = os.path.abspath("%s.nii.gz" % self.inputs.out_base) - outputs["out_parameter"] = os.path.abspath("%s.eddy_parameters" % self.inputs.out_base) - - # File generation might depend on the version of EDDY - out_rotated_bvecs = os.path.abspath("%s.eddy_rotated_bvecs" % self.inputs.out_base) - out_movement_rms = os.path.abspath("%s.eddy_movement_rms" % self.inputs.out_base) - out_restricted_movement_rms = os.path.abspath( - "%s.eddy_restricted_movement_rms" % self.inputs.out_base - ) - out_shell_alignment_parameters = os.path.abspath( - "%s.eddy_post_eddy_shell_alignment_parameters" % self.inputs.out_base - ) - shell_PE_translation_parameters = op.abspath( - "%s.eddy_post_eddy_shell_PE_translation_parameters" % self.inputs.out_base - ) - out_outlier_report = os.path.abspath("%s.eddy_outlier_report" % self.inputs.out_base) - outlier_map = op.abspath("%s.eddy_outlier_map" % self.inputs.out_base) - outlier_n_stdev_map = op.abspath("%s.eddy_outlier_n_stdev_map" % self.inputs.out_base) - outlier_n_sqr_stdev_map = op.abspath( - "%s.eddy_outlier_n_sqr_stdev_map" % self.inputs.out_base - ) - - if isdefined(self.inputs.cnr_maps) and self.inputs.cnr_maps: - out_cnr_maps = os.path.abspath("%s.eddy_cnr_maps.nii.gz" % self.inputs.out_base) - if os.path.exists(out_cnr_maps): - outputs["out_cnr_maps"] = out_cnr_maps - if isdefined(self.inputs.residuals) and self.inputs.residuals: - out_residuals = os.path.abspath("%s.eddy_residuals.nii.gz" % self.inputs.out_base) - if os.path.exists(out_residuals): - outputs["out_residuals"] = out_residuals - - if os.path.exists(out_rotated_bvecs): - outputs["out_rotated_bvecs"] = out_rotated_bvecs - if os.path.exists(out_movement_rms): - outputs["out_movement_rms"] = out_movement_rms - if os.path.exists(out_restricted_movement_rms): - outputs["out_restricted_movement_rms"] = out_restricted_movement_rms - if os.path.exists(out_shell_alignment_parameters): - outputs["out_shell_alignment_parameters"] = out_shell_alignment_parameters - if os.path.exists(out_outlier_report): - outputs["out_outlier_report"] = out_outlier_report - - if op.exists(shell_PE_translation_parameters): - outputs["shell_PE_translation_parameters"] = shell_PE_translation_parameters - if op.exists(outlier_map): - outputs["outlier_map"] = outlier_map - if op.exists(outlier_n_stdev_map): - outputs["outlier_n_stdev_map"] = outlier_n_stdev_map - if op.exists(outlier_n_sqr_stdev_map): - outputs["outlier_n_sqr_stdev_map"] = outlier_n_sqr_stdev_map - - return outputs - - def _format_arg(self, name, spec, value): - if name == "field": - pth, fname, _ = split_filename(value) - return spec.argstr % op.join(pth, fname) - if name == "json": - if isdefined(self.inputs.mporder): - return spec.argstr % value - return "" - return super(ExtendedEddy, self)._format_arg(name, spec, value) - - -class Eddy2SPMMotionInputSpec(BaseInterfaceInputSpec): - eddy_motion = File(exists=True) - - -class Eddy2SPMMotionOututSpec(TraitedSpec): - spm_motion_file = File(exists=True) - - -class Eddy2SPMMotion(SimpleInterface): - input_spec = Eddy2SPMMotionInputSpec - output_spec = Eddy2SPMMotionOututSpec - - def _run_interface(self, runtime): - # Load the eddy motion params File - eddy_motion = np.loadtxt(self.inputs.eddy_motion) - spm_motion = eddy_motion[:, :6] - spm_motion_file = fname_presuffix( - self.inputs.eddy_motion, suffix="spm_rp.txt", use_ext=False, newpath=runtime.cwd - ) - np.savetxt(spm_motion_file, spm_motion) - self._results["spm_motion_file"] = spm_motion_file - - return runtime - - -def boilerplate_from_eddy_config(eddy_config, fieldmap_type, pepolar_method): - """Write boilerplate text based on an eddy config dict.""" - doing_2stage = "drbuddi" in pepolar_method.lower() - ext_eddy = ExtendedEddy(**eddy_config) - desc = [ - "FSL (version %s)'s eddy was used for head motion correction and " - "Eddy current correction [@anderssoneddy]." % ext_eddy.version - ] - - # Basic eddy setup - desc.append( - "Eddy was configured with a $q$-space smoothing factor " - "of %d," % ext_eddy.inputs.fudge_factor - ) - desc.append("a total of %d iterations," % ext_eddy.inputs.niter) - desc.append("and %d voxels used to estimate hyperparameters." % ext_eddy.inputs.nvoxhp) - - # Specify flm/slm model types - slm = ( - "was" - if ext_eddy.inputs.slm == "none" - else "and a %s second level model were" % ext_eddy.inputs.slm - ) - desc.append( - "A %s first level model %s used to characterize Eddy current-" - "related spatial distortion." % (ext_eddy.inputs.flm, slm) - ) - - # fwhm of pre-conditioning filter - if isdefined(ext_eddy.inputs.fwhm): - desc.append( - "A filter with fwhm=%04f was used to pre-condition the " - "data before using it to estimate distortions." % ext_eddy.inputs.fwhm - ) - - # force shelled scheme - if isdefined(ext_eddy.inputs.is_shelled) and ext_eddy.inputs.is_shelled: - desc.append("$q$-space coordinates were forcefully assigned to shells.") - - if isdefined(ext_eddy.inputs.fep) and ext_eddy.inputs.fep: - desc.append("Any empty planes detected in images were filled.") - - # did you sep_offs_mov? - if isdefined(ext_eddy.inputs.dont_sep_offs_move) and ext_eddy.inputs.dont_sep_offs_move: - desc.append("No attempt was made to separate field offset from " "subject movement.") - else: - desc.append("Field offset was attempted to be separated from " "subject movement.") - - # did you peas? - if isdefined(ext_eddy.inputs.dont_peas) and ext_eddy.inputs.dont_peas: - desc.append("No alignment of shells was performed post-eddy.") - else: - desc.append("Shells were aligned post-eddy.") - - # repol settings - if isdefined(ext_eddy.inputs.repol) and ext_eddy.inputs.repol: - desc.append("Eddy's outlier replacement was run [@eddyrepol].") - - ol_group = { - "sw": "slice", - "gw": "multi-band group", - "both": "both slice and multi-band group", - traits.Undefined: "slice", - }[ext_eddy.inputs.outlier_type] - nvox = ext_eddy.inputs.outlier_nstd if isdefined(ext_eddy.inputs.outlier_nstd) else 250 - desc.append( - "Data were grouped by %s, only including values from " - "slices determined to contain at least %d intracerebral " - "voxels." % (ol_group, nvox) - ) - mbf = ( - ext_eddy.inputs.multiband_factor if isdefined(ext_eddy.inputs.multiband_factor) else 1 - ) - mb_off = ( - ext_eddy.inputs.multiband_offset if isdefined(ext_eddy.inputs.multiband_factor) else 0 - ) - if mbf > 1 and "multi-band group" in ol_group: - offs_txt = "was" - if mb_off != 0: - offs_txt = {-1: "bottom", 1: "top"} - offs_txt = "and slices removed from the %s of the volume were" % offs_txt - desc.append("A multi-band accelleration factor of %d " "%s assumed." % (mbf, offs_txt)) - - # The threshold for outliers - std_threshold = ( - ext_eddy.inputs.outlier_nstd if isdefined(ext_eddy.inputs.outlier_nstd) else 4 - ) - ssq = ( - " sum-of-squares" - if isdefined(ext_eddy.inputs.outlier_sqr) and ext_eddy.inputs.outlier_sqr - else "" - ) - pos = ( - " (positively or negatively%s)" % ssq - if isdefined(ext_eddy.inputs.outlier_pos) and ext_eddy.inputs.outlier_pos - else "" - ) - desc.append( - "Groups deviating by more than %d standard deviations%s from the prediction " - "had their data replaced with imputed values." % (std_threshold, pos) - ) - - # slice-to-vol - if isdefined(ext_eddy.inputs.mporder) and ext_eddy.inputs.mporder > 0: - niter = ( - ext_eddy.inputs.slice2vol_niter if isdefined(ext_eddy.inputs.slice2vol_niter) else 5 - ) - lam = ( - ext_eddy.inputs.slice2vol_lambda if isdefined(ext_eddy.inputs.slice2vol_lambda) else 1 - ) - s2v_interp = ( - ext_eddy.inputs.slice2vol_interp - if isdefined(ext_eddy.inputs.slice2vol_interp) - else "trilinear" - ) - desc.append( - "Slice-to-volume correction was estimated with " - "temporal order %d, %d iterations, %s interpolation " - "and lambda=%.3f [@eddys2v]." % (ext_eddy.inputs.mporder, niter, s2v_interp, lam) - ) - - # distortion correction - if "topup" in pepolar_method.lower(): - desc.append(topup_boilerplate(fieldmap_type, pepolar_method)) - # DRBUDDI is described in its own workflow - - # move by susceptibility - if ( - isdefined(ext_eddy.inputs.estimate_move_by_susceptibility) - and ext_eddy.inputs.estimate_move_by_susceptibility - ): - mbs_niter = ext_eddy.inputs.mbs_niter if isdefined(ext_eddy.inputs.mbs_niter) else 10 - mbs_lambda = ( - ext_eddy.inputs.mbs_mbs_lambda if isdefined(ext_eddy.inputs.mbs_lambda) else 10 - ) - mbs_ksp = ext_eddy.inputs.mbs_ksp if isdefined(ext_eddy.inputs.mbs_ksp) else 10 - desc.append( - "Dynamic susceptibility distortion correction was " - "applied with %d iterations, lambda=%.2f and spline " - "knot-spacing of %.2fmm [@eddysus]." % (mbs_niter, mbs_lambda, mbs_ksp) - ) - - # Format the interpolation - lsr_ref = " [@fsllsr]" if ext_eddy.inputs.method == "lsr" else "" - if doing_2stage: - desc.append( - "Interpolation after head motion and initial susceptibility " "distortion correction" - ) - else: - desc.append("Final interpolation") - desc.append("was performed using the `%s` method%s." % (ext_eddy.inputs.method, lsr_ref)) - if not doing_2stage: - desc.append("\n\n") - return " ".join(desc) - - -def topup_boilerplate(fieldmap_type, pepolar_method): - """Write boilerplate text based on fieldmaps""" - if fieldmap_type not in ("rpe_series", "epi"): - return "" - desc = [] - desc.append( - "\n\nData was collected with reversed phase-encode blips, resulting " - "in pairs of images with distortions going in opposite directions." - ) - - if "drbuddi" in pepolar_method.lower(): - desc.append( - "Distortion correction was performed in two stages. In the first stage, " - "FSL's TOPUP [@topup]" - ) - else: - desc.append("FSL's TOPUP [@topup]") - - desc.append("was used to estimate a susceptibility-induced off-resonance field based on") - if fieldmap_type == "epi": - desc.append("b=0 reference images with reversed " "phase encoding directions.") - else: - desc.append( - "b=0 images extracted from multiple DWI series " - "with reversed phase encoding directions." - ) - desc.append( - "The TOPUP-estimated fieldmap was incorporated into the " - "Eddy current and head motion correction interpolation." - ) - - return " ".join(desc) diff --git a/qsirecon/interfaces/epi_fmap.py b/qsirecon/interfaces/epi_fmap.py deleted file mode 100644 index 2871e550..00000000 --- a/qsirecon/interfaces/epi_fmap.py +++ /dev/null @@ -1,435 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -import json -import os.path as op -from collections import defaultdict -from pathlib import Path - -import nibabel as nb -import numpy as np -import pandas as pd -from nilearn.image import concat_imgs, index_img, load_img -from nipype import logging -from nipype.utils.filemanip import fname_presuffix, split_filename - -from .. import config -from .gradients import concatenate_bvals -from .images import to_lps -from .reports import topup_selection_to_report - -LOGGER = logging.getLogger("nipype.interface") -CRITICAL_KEYS = ["PhaseEncodingDirection", "TotalReadoutTime", "EffectiveEchoSpacing"] - - -def _merge_metadata(metadatas): - # Combine metadata from merged b=0 images - if not metadatas: - return {} - - merged_metadata = metadatas[0] - for next_metadata in metadatas[1:]: - for critical_key in CRITICAL_KEYS: - current_value = merged_metadata.get(critical_key) - next_value = next_metadata.get(critical_key) - if not current_value == next_value: - LOGGER.warning( - "%s inconsistent in fieldmaps: %s, %s", - critical_key, - str(current_value), - str(next_value), - ) - return merged_metadata - - -def read_nifti_sidecar(json_file): - if not json_file.endswith(".json"): - json_file = fname_presuffix(json_file, suffix=".json", use_ext=False) - if not op.exists(json_file): - raise Exception("No corresponding json file found") - - with open(json_file, "r") as f: - metadata = json.load(f) - pe_dir = metadata["PhaseEncodingDirection"] - slice_times = metadata.get("SliceTiming") - trt = metadata.get("TotalReadoutTime") - if trt is None: - pass - return {"PhaseEncodingDirection": pe_dir, "SliceTiming": slice_times, "TotalReadoutTime": trt} - - -acqp_lines = { - "i": "1 0 0 %.6f", - "j": "0 1 0 %.6f", - "k": "0 0 1 %.6f", - "i-": "-1 0 0 %.6f", - "j-": "0 -1 0 %.6f", - "k-": "0 0 -1 %.6f", -} - - -def load_epi_dwi_fieldmaps(fmap_list, b0_threshold): - """Creates a 4D image of b=0s from a list of input images. - - Parameters: - ----------- - - fmap_list: list - List of paths to epi fieldmap images - b0_threshold: int - Maximum b value for an image to be considered a b=0 - - Returns: - -------- - - concatenated_images: spatial image - The b=0 volumes concatenated into a 4D image - b0_indices: list - List of the indices in the concatenated images that contain usable images - original_files: list - List of the original files where each b=0 image came from. - - """ - # Add in the rpe data, if it exists - b0_indices = [] - original_files = [] - image_series = [] - - for fmap_file in fmap_list: - pth, fname, _ = split_filename(fmap_file) - potential_bval_file = op.join(pth, fname) + ".bval" - starting_index = len(original_files) - fmap_img = load_img(fmap_file) - image_series.append(fmap_img) - num_images = 1 if fmap_img.ndim == 3 else fmap_img.shape[3] - original_files += [fmap_file] * num_images - - # Which images are b=0 images? - if op.exists(potential_bval_file): - # If there is a secret bval file, check that it's allowed - bvals = np.loadtxt(potential_bval_file) - if fmap_img.ndim == 3 and len(bvals) == 1: - _b0_indices = np.arange(num_images) + starting_index - elif fmap_img.ndim == 4 and len(bvals) == fmap_img.shape[3]: - too_large = np.flatnonzero(bvals > b0_threshold) - too_large_values = bvals[too_large] - if too_large.size: - LOGGER.warning( - "Excluding volumes %s from the %s because b=%s is greater than %d", - str(too_large), - fmap_file, - str(too_large_values), - b0_threshold, - ) - _b0_indices = np.flatnonzero(bvals < b0_threshold) + starting_index - else: - raise Exception( - "Secret fieldmap file %s mismatches its image file %s" - % (potential_bval_file, fmap_file) - ) - else: - _b0_indices = np.arange(num_images) + starting_index - b0_indices += _b0_indices.tolist() - - concatenated_images = concat_imgs(image_series, auto_resample=True) - return concatenated_images, b0_indices, original_files - - -def get_distortion_grouping(origin_file_list): - """Discover which distortion groups are present, then assign each volume to a group.""" - unique_files = sorted(set(origin_file_list)) - unique_acqps = [] - line_lookup = {} - for unique_dwi in unique_files: - spec = read_nifti_sidecar(unique_dwi) - spec_line = acqp_lines[spec["PhaseEncodingDirection"]] - acqp_line = spec_line % spec["TotalReadoutTime"] - if acqp_line not in unique_acqps: - unique_acqps.append(acqp_line) - line_lookup[unique_dwi] = unique_acqps.index(acqp_line) + 1 - - group_numbers = [line_lookup[dwi_file] for dwi_file in origin_file_list] - return unique_acqps, group_numbers - - -def eddy_inputs_from_dwi_files(origin_file_list, eddy_prefix): - unique_acqps, group_numbers = get_distortion_grouping(origin_file_list) - - # Create the acqp file - acqp_file = eddy_prefix + "acqp.txt" - with open(acqp_file, "w") as f: - f.write("\n".join(unique_acqps)) - - # Create the index file - index_file = eddy_prefix + "index.txt" - with open(index_file, "w") as f: - f.write(" ".join(map(str, group_numbers))) - - return acqp_file, index_file - - -def get_best_b0_topup_inputs_from( - dwi_file, - bval_file, - b0_threshold, - cwd, - bids_origin_files, - epi_fmaps=None, - max_per_spec=3, - topup_requested=False, - raw_image_sdc=True, -): - """Create a datain spec and a slspec from a concatenated dwi series. - - Create inputs for TOPUP that come from data in ``dwi/`` and epi fieldmaps in ``fmap/``. - The ``nii_file`` input may be the result of concatenating a number of scans with different - distortions present. The original source of each volume in ``nii_file`` is listed in - ``bids_origin_files``. - - The strategy is to select ``max_per_spec`` b=0 images from each distortion group. - Here, distortion group uses the FSL definition of a phase encoding direction and - total readout time, as specified in the datain file used by TOPUP (i.e. "0 -1 0 0.087"). - - Parameters - ---------- - nii_file : str - A 4D DWI Series - bval_file: str - indices into nii_file that can be used by topup - topup_prefix: str - file prefix for topup inputs - bids_origin_files: list - A list with the original bids file of each image in ``nii_file``. This is - necessary because merging may have happened earlier in the pipeline - epi_fmaps: - A list of images from the fmaps/ directory. - max_per_spec: int - The maximum number of b=0 images to extract from a PE direction / image set - - """ - - # Start with the DWI file. Determine which images are b=0 and where they came from - dwi_b0_df = split_into_b0s_and_origins( - b0_threshold, - bids_origin_files, - dwi_file, - cwd, - bval_file=bval_file, - b0_indices=None, - use_original_files=raw_image_sdc, - ) - - # If there are epi fieldmaps, add them to the table - if epi_fmaps: - epi_4d, epi_b0_indices, epi_original_files = load_epi_dwi_fieldmaps( - epi_fmaps, b0_threshold - ) - epi_b0_df = split_into_b0s_and_origins( - b0_threshold, - epi_original_files, - epi_4d, - cwd, - bval_file=None, - b0_indices=epi_b0_indices, - ) - dwi_b0_df = pd.concat([dwi_b0_df, epi_b0_df], axis=0, ignore_index=True) - - unique_bids_files = dwi_b0_df.bids_origin_file.unique().tolist() - spec_lookup = {} - slicetime_lookup = {} - for unique_bids_file in unique_bids_files: - spec = read_nifti_sidecar(unique_bids_file) - spec_line = acqp_lines[spec["PhaseEncodingDirection"]] - spec_lookup[unique_bids_file] = spec_line % spec["TotalReadoutTime"] - slicetime_lookup[unique_bids_file] = spec["SliceTiming"] - - # Group the b=0 images by their spec - dwi_b0_df["fsl_spec"] = dwi_b0_df["bids_origin_file"].map(spec_lookup) - # Write the datain text file and make sure it's usable if it's needed - if len(dwi_b0_df["fsl_spec"].unique()) < 2 and topup_requested: - config.loggers.workflow.critical(dwi_b0_df["fsl_spec"]) - raise Exception( - "Unable to run TOPUP: not enough distortion groups. " - 'Check "IntendedFor" fields or consider using --ignore fieldmaps.' - ) - spec_groups = dwi_b0_df.groupby("fsl_spec") - max_per_spec = min(max_per_spec, min(spec_groups.apply(len))) - - # Calculate the "quality" of each image: - dwi_b0_df["qc_score"] = spec_groups["nii_3d_files"].transform(calculate_best_b0s) - dwi_b0_df["qc_rank"] = ( - np.nan_to_num(spec_groups["qc_score"].rank(ascending=True), nan=1.0).astype(int) - 1 - ) - - # Select only the top - dwi_b0_df["selected_for_sdc"] = dwi_b0_df["qc_rank"] < max_per_spec - sdc_selections = dwi_b0_df[dwi_b0_df["selected_for_sdc"]].reset_index() - # Make sure the first image in topup imain has the same distortion as the - # first b=0 volume in the eddy inputs - sdc_selections["same_as_first"] = sdc_selections["fsl_spec"] == dwi_b0_df.loc[0, "fsl_spec"] - sdc_selections.sort_values( - by=["same_as_first", "index"], ascending=[False, True], inplace=True - ) - - imain_output = cwd + "/topup_imain.nii.gz" - imain_img = concat_imgs( - [to_lps(img, new_axcodes=("L", "A", "S")) for img in sdc_selections["nii_3d_files"]], - auto_resample=True, - ) - imain_img.to_filename(imain_output) - - datain_file = cwd + "/topup_datain.txt" - with open(datain_file, "w") as f: - f.write("\n".join(sdc_selections["fsl_spec"])) - - b0_csv = cwd + "/b0_selection_info.csv" - dwi_b0_df.drop("nii_3d_files", 1).to_csv(b0_csv, index=False) - - # get out reference images from the topup and eddy data - topup_reg_file = cwd + "/topup_reg_image.nii.gz" - index_img(imain_output, 0).to_filename(topup_reg_file) - - topup_report = topup_selection_to_report( - np.flatnonzero(dwi_b0_df["selected_for_sdc"]), - dwi_b0_df["bids_origin_file"], - spec_lookup, - image_source="data", - ) - return ( - datain_file, - imain_output, - topup_report, - b0_csv, - topup_reg_file, - dwi_b0_df.loc[0, "nii_3d_files"], - ) - - -def relative_b0_index(b0_indices, original_files): - """Find the index of each b=0 image in its original imaging series - - >>> b0_indices = [0, 7, 11, 15, 17, 30, 37, 41, 45] - >>> original_files = ["sub-1_dir-AP_dwi.nii.gz"] * 30 + ["sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> print( - ... relative_b0_index(b0_indices, - ... original_files)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE - [0, 7, 11, 15, 17, 0, 7, 11, 15] - - - Or - - >>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> print(relative_b0_index(b0_indices, original_files)) - [0, 7, 11, 0, 2, 0, 7, 11, 15] - - """ - image_counts = defaultdict(int) - ordered_files = [] - for original_file in original_files: - if original_file not in image_counts: - ordered_files.append(original_file) - image_counts[original_file] += 1 - offsets = [0] - for original_file in ordered_files: - offsets.append(offsets[-1] + image_counts[original_file]) - image_offsets = dict(zip(ordered_files, offsets)) - - original_indices = [] - for b0_index in b0_indices: - original_file = original_files[b0_index] - original_index = b0_index - image_offsets[original_file] - original_indices.append(original_index) - - return original_indices - - -def calculate_best_b0s(b0_list, radius=4): - import SimpleITK as sitk - - imgs = [sitk.ReadImage(fname, sitk.sitkFloat64) for fname in b0_list] - no_reg = sitk.ImageRegistrationMethod() - no_reg.SetMetricSamplingStrategy(no_reg.NONE) - no_reg.SetMetricAsCorrelation() - pairwise = np.zeros((len(b0_list), len(b0_list)), dtype=np.float64) - for id0, id1 in zip(*np.triu_indices(len(b0_list), 1)): - pairwise[id0, id1] = no_reg.MetricEvaluate(imgs[id0], imgs[id1]) - pairwise = pairwise + pairwise.T - # Don't include self correlation - np.fill_diagonal(pairwise, np.nan) - return np.nanmean(pairwise, axis=0) - - -def _get_bvals(bval_input): - if isinstance(bval_input, list): - return concatenate_bvals(bval_input, None) - return np.loadtxt(bval_input) - - -# In case of a 3d image -def safe_get_3d_image(img_file, b0_index): - if isinstance(img_file, Path) or isinstance(img_file, str): - _img = nb.load(img_file) - else: - _img = img_file - if _img.ndim < 4: - if b0_index > 0: - raise Exception("Impossible b=0 index in a 3d image") - return _img - return index_img(_img, b0_index) - - -def split_into_b0s_and_origins( - b0_threshold, - original_files, - img_file, - cwd, - b0_indices=None, - bval_file=None, - use_original_files=True, -): - """ """ - b0_bids_files = [] - b0_nii_files = [] - full_img = load_img(img_file) - - # If no b=0 indices were provided, get them from the bvals or assume everything - # is a b=0 - if b0_indices is None: - if bval_file is not None: - # Start with the DWI file. Determine which images are b=0 - bvals = _get_bvals(bval_file) - b0_indices = np.flatnonzero(bvals < b0_threshold) - if not b0_indices.size: - raise RuntimeError("No b=0 images available.") - else: - # Assume they're all b=0 - b0_indices = ( - np.array([0]) if full_img.ndim < 4 else np.arange(full_img.shape[3], dtype=int) - ) - - relative_indices = relative_b0_index(b0_indices, original_files) - - # find the original files accompanying each b=0 - for b0_index, original_index in zip(b0_indices, relative_indices): - original_file = original_files[b0_index] - b0_bids_files.append(original_file) - new_b0_path = fname_presuffix( - original_file, suffix="_b0-%02d" % original_index, newpath=cwd - ) - image_source = original_file if use_original_files else full_img - source_index = original_index if use_original_files else b0_index - print("image_source", image_source) - print("new_b0_path", new_b0_path) - safe_get_3d_image(image_source, source_index).to_filename(new_b0_path) - b0_nii_files.append(new_b0_path) - - return pd.DataFrame( - { - "nii_3d_files": b0_nii_files, - "bids_origin_file": b0_bids_files, - "original_volume": relative_indices, - } - ) diff --git a/qsirecon/interfaces/fmap.py b/qsirecon/interfaces/fmap.py deleted file mode 100644 index 7607db59..00000000 --- a/qsirecon/interfaces/fmap.py +++ /dev/null @@ -1,1523 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Interfaces to deal with the various types of fieldmap sources - - .. testsetup:: - - >>> tmpdir = getfixture('tmpdir') - >>> tmp = tmpdir.chdir() # changing to a temporary directory - >>> nb.Nifti1Image(np.zeros((90, 90, 60)), None, None).to_filename( - ... tmpdir.join('epi.nii.gz').strpath) - - -""" -import json -import os.path as op -from collections import defaultdict - -import nibabel as nb -import nilearn.image as nim -import nilearn.plotting as nip -import numpy as np -from lxml import etree -from nilearn.image import concat_imgs, index_img, iter_img, load_img, math_img -from nipype import logging -from nipype.interfaces import ants -from nipype.interfaces.ants.resampling import ApplyTransformsInputSpec -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - InputMultiObject, - OutputMultiObject, - SimpleInterface, - TraitedSpec, - isdefined, - traits, -) -from nipype.interfaces.fsl.epi import TOPUP, TOPUPInputSpec -from nipype.interfaces.mixins import reporting -from nipype.utils.filemanip import fname_presuffix, split_filename -from niworkflows.viz.utils import ( - SVGNS, - compose_view, - cuts_from_bbox, - extract_svg, - robust_set_limits, - uuid4, -) -from svgutils.transform import SVGFigure - -from .images import to_lps -from .reports import topup_selection_to_report - -LOGGER = logging.getLogger("nipype.interface") -CRITICAL_KEYS = ["PhaseEncodingDirection", "TotalReadoutTime", "EffectiveEchoSpacing"] - - -class _ParallelTOPUPInputSpec(TOPUPInputSpec): - nthreads = traits.Int(argstr="--nthr=%d", nohash=True, mandatory=False) - - -class ParallelTOPUP(TOPUP): - input_spec = _ParallelTOPUPInputSpec - - -class B0RPEFieldmapInputSpec(BaseInterfaceInputSpec): - b0_file = InputMultiObject(File(exists=True)) - output_3d_images = traits.Bool(False, usedefault=True) - max_num_b0s = traits.Int(3, usedefault=True) - orientation = traits.Enum("LPS", "LAS", default="LPS", usedefault=True) - b0_threshold = traits.Int(100, usedefault=True) - - -class B0RPEFieldmapOutputSpec(TraitedSpec): - fmap_file = OutputMultiObject(File(exists=True)) - fmap_info = OutputMultiObject(File(exists=True)) - fmap_report = traits.Str() - - -class B0RPEFieldmap(SimpleInterface): - """Prepares b=0 EPI fieldmaps to be used for distortion correction. - Some siemens scanners are unable to make a b=0 image by itself, and will produce - a dwi series (with bvals and bvecs). This interface removes the b>0 volumes and - writes the b=0 images in the resuested orientation (LAS+ for FSL, or LPS+ for - everything else). - - **Inputs** - b0_file: str - List of paths to b=0 epi fieldmaps in fmaps/ or an RPE series in dwi/ - output_3d_images: bool - Write outputs as multiple 3d images - max_num_b0s: int - Include a maximum number of b=0 images in the outputs - orientation: str - Write the outputs in either 'LAS' or 'LPS' orientation - - """ - - input_spec = B0RPEFieldmapInputSpec - output_spec = B0RPEFieldmapOutputSpec - - def _run_interface(self, runtime): - - # Get b=0 images from all the inputs - b0_series, b0_indices, original_files = load_epi_dwi_fieldmaps( - self.inputs.b0_file, self.inputs.b0_threshold - ) - - # Only get the requested number of images - _, fmap_imain, fmap_report, _ = topup_inputs_from_4d_file( - b0_series, - b0_indices, - original_files, - image_source="EPI fieldmap", - max_per_spec=self.inputs.max_num_b0s, - ) - LOGGER.info(fmap_report) - - # Get b=0 images and metadata from all the input images - b0_fieldmap_metadata = [] - for image_path in set(original_files): - pth, fname, _ = split_filename(image_path) - original_json = op.join(pth, fname) + ".json" - b0_fieldmap_metadata.append(original_json) - - # Warn the user if the metadata does not match - merged_metadata = _merge_metadata(b0_fieldmap_metadata) - merged_b0s = to_lps(fmap_imain, tuple(self.inputs.orientation)) - # Output just one 3/4d image and a sidecar - if not self.inputs.output_3d_images: - # Save the conformed fmap - output_fmap = fname_presuffix( - self.inputs.b0_file[0], suffix="conform", newpath=runtime.cwd - ) - output_json = fname_presuffix(output_fmap, use_ext=False, suffix=".json") - fmap_imain.to_filename(output_fmap) - with open(output_json, "w") as sidecar: - json.dump(merged_metadata, sidecar) - self._results["fmap_file"] = output_fmap - self._results["fmap_info"] = output_json - return runtime - - image_list = [] - json_list = [] - for imgnum, img in enumerate(iter_img(merged_b0s)): - - # Save the conformed fmap and metadata - output_fmap = fname_presuffix( - self.inputs.b0_file[0], - suffix="%s_%03d" % (self.inputs.orientation, imgnum), - newpath=runtime.cwd, - ) - output_json = fname_presuffix(output_fmap, use_ext=False, suffix=".json") - with open(output_json, "w") as sidecar: - json.dump(merged_metadata, sidecar) - img.to_filename(output_fmap) - - # Append to lists - image_list.append(output_fmap) - json_list.append(output_json) - - self._results["fmap_file"] = image_list - self._results["fmap_info"] = json_list - return runtime - - -def _merge_metadata(metadatas): - # Combine metadata from merged b=0 images - if not metadatas: - return {} - - merged_metadata = metadatas[0] - for next_metadata in metadatas[1:]: - for critical_key in CRITICAL_KEYS: - current_value = merged_metadata.get(critical_key) - next_value = next_metadata.get(critical_key) - if not current_value == next_value: - LOGGER.warning( - "%s inconsistent in fieldmaps: %s, %s", - critical_key, - str(current_value), - str(next_value), - ) - return merged_metadata - - -class FieldToRadSInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input fieldmap") - fmap_range = traits.Float(desc="range of input field map") - - -class FieldToRadSOutputSpec(TraitedSpec): - out_file = File(desc="the output fieldmap") - fmap_range = traits.Float(desc="range of input field map") - - -class FieldToRadS(SimpleInterface): - """ - The FieldToRadS converts from arbitrary units to rad/s - """ - - input_spec = FieldToRadSInputSpec - output_spec = FieldToRadSOutputSpec - - def _run_interface(self, runtime): - fmap_range = None - if isdefined(self.inputs.fmap_range): - fmap_range = self.inputs.fmap_range - self._results["out_file"], self._results["fmap_range"] = _torads( - self.inputs.in_file, fmap_range, newpath=runtime.cwd - ) - return runtime - - -class FieldToHzInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input fieldmap") - range_hz = traits.Float(mandatory=True, desc="range of input field map") - - -class FieldToHzOutputSpec(TraitedSpec): - out_file = File(desc="the output fieldmap") - - -class FieldToHz(SimpleInterface): - """ - The FieldToHz converts from arbitrary units to Hz - """ - - input_spec = FieldToHzInputSpec - output_spec = FieldToHzOutputSpec - - def _run_interface(self, runtime): - self._results["out_file"] = _tohz( - self.inputs.in_file, self.inputs.range_hz, newpath=runtime.cwd - ) - return runtime - - -class Phasediff2FieldmapInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input fieldmap") - metadata = traits.Dict(mandatory=True, desc="BIDS metadata dictionary") - - -class Phasediff2FieldmapOutputSpec(TraitedSpec): - out_file = File(desc="the output fieldmap") - - -class Phasediff2Fieldmap(SimpleInterface): - """ - Convert a phase difference map into a fieldmap in Hz - """ - - input_spec = Phasediff2FieldmapInputSpec - output_spec = Phasediff2FieldmapOutputSpec - - def _run_interface(self, runtime): - self._results["out_file"] = phdiff2fmap( - self.inputs.in_file, _delta_te(self.inputs.metadata), newpath=runtime.cwd - ) - return runtime - - -class Phases2FieldmapInputSpec(BaseInterfaceInputSpec): - phase_files = InputMultiObject( - File(exists=True), mandatory=True, desc="list of phase1, phase2 files" - ) - metadatas = traits.List( - traits.Dict, mandatory=True, desc="list of phase1, phase2 metadata dicts" - ) - - -class Phases2FieldmapOutputSpec(TraitedSpec): - out_file = File(desc="the output fieldmap") - phasediff_metadata = traits.Dict(desc="the phasediff metadata") - - -class Phases2Fieldmap(SimpleInterface): - """ - Convert a phase1, phase2 into a difference map - """ - - input_spec = Phases2FieldmapInputSpec - output_spec = Phases2FieldmapOutputSpec - - def _run_interface(self, runtime): - # Get the echo times - fmap_file, merged_metadata = phases2fmap( - self.inputs.phase_files, self.inputs.metadatas, newpath=runtime.cwd - ) - self._results["phasediff_metadata"] = merged_metadata - self._results["out_file"] = fmap_file - return runtime - - -def phases2fmap(phase_files, metadatas, newpath=None): - """Calculates a phasediff from two phase images. Assumes monopolar - readout.""" - from copy import deepcopy - - import nibabel as nb - import numpy as np - from nipype.utils.filemanip import fname_presuffix - - phasediff_file = fname_presuffix(phase_files[0], suffix="_phasediff", newpath=newpath) - echo_times = [meta.get("EchoTime") for meta in metadatas] - if None in echo_times or echo_times[0] == echo_times[1]: - raise RuntimeError() - # Determine the order of subtraction - short_echo_index = echo_times.index(min(echo_times)) - long_echo_index = echo_times.index(max(echo_times)) - - short_phase_image = phase_files[short_echo_index] - long_phase_image = phase_files[long_echo_index] - - image0 = nb.load(short_phase_image) - phase0 = image0.get_fdata() - image1 = nb.load(long_phase_image) - phase1 = image1.get_fdata() - - def rescale_image(img): - if np.any(img < -128): - # This happens sometimes on 7T fieldmaps - LOGGER.info("Found negative values in phase image: rescaling") - imax = img.max() - imin = img.min() - scaled = 2 * ((img - imin) / (imax - imin) - 0.5) - return np.pi * scaled - mask = img > 0 - imax = img.max() - imin = img.min() - max_check = imax - 4096 - if np.abs(max_check) > 10 or np.abs(imin) > 10: - LOGGER.warning("Phase image may be scaled incorrectly: check results") - return mask * (img / 2048 * np.pi - np.pi) - - # Calculate fieldmaps - rad0 = rescale_image(phase0) - rad1 = rescale_image(phase1) - a = np.cos(rad0) - b = np.sin(rad0) - c = np.cos(rad1) - d = np.sin(rad1) - fmap = -np.arctan2(b * c - a * d, a * c + b * d) - - phasediff_nii = nb.Nifti1Image(fmap, image0.affine) - phasediff_nii.set_data_dtype(np.float32) - phasediff_nii.to_filename(phasediff_file) - - merged_metadata = deepcopy(metadatas[0]) - del merged_metadata["EchoTime"] - merged_metadata["EchoTime1"] = float(echo_times[short_echo_index]) - merged_metadata["EchoTime2"] = float(echo_times[long_echo_index]) - - return phasediff_file, merged_metadata - - -def _despike2d(data, thres, neigh=None): - """ - despiking as done in FSL fugue - """ - - if neigh is None: - neigh = [-1, 0, 1] - nslices = data.shape[-1] - - for k in range(nslices): - data2d = data[..., k] - - for i in range(data2d.shape[0]): - for j in range(data2d.shape[1]): - vals = [] - thisval = data2d[i, j] - for ii in neigh: - for jj in neigh: - try: - vals.append(data2d[i + ii, j + jj]) - except IndexError: - pass - vals = np.array(vals) - patch_range = vals.max() - vals.min() - patch_med = np.median(vals) - - if patch_range > 1e-6 and (abs(thisval - patch_med) / patch_range) > thres: - data[i, j, k] = patch_med - return data - - -def _unwrap(fmap_data, mag_file, mask=None): - import os - - fsl_check = os.environ.get("FSL_BUILD") - if fsl_check == "no_fsl": - raise Exception( - """Container in use does not have FSL. To use this workflow, - please download the qsirecon container with FSL installed.""" - ) - from math import pi - - from nipype.interfaces.fsl import PRELUDE - - magnii = nb.load(mag_file) - - if mask is None: - mask = np.ones_like(fmap_data, dtype=np.uint8) - - fmapmax = max(abs(fmap_data[mask > 0].min()), fmap_data[mask > 0].max()) - fmap_data *= pi / fmapmax - - nb.Nifti1Image(fmap_data, magnii.affine).to_filename("fmap_rad.nii.gz") - nb.Nifti1Image(mask, magnii.affine).to_filename("fmap_mask.nii.gz") - nb.Nifti1Image(magnii.get_fdata(), magnii.affine).to_filename("fmap_mag.nii.gz") - - # Run prelude - res = PRELUDE( - phase_file="fmap_rad.nii.gz", - magnitude_file="fmap_mag.nii.gz", - mask_file="fmap_mask.nii.gz", - ).run() - - unwrapped = nb.load(res.outputs.unwrapped_phase_file).get_fdata() * (fmapmax / pi) - return unwrapped - - -def get_ees(in_meta, in_file=None): - """ - Calculate the *effective echo spacing* :math:`t_\\text{ees}` - for an input :abbr:`EPI (echo-planar imaging)` scan. - - - There are several procedures to calculate the effective - echo spacing. The basic one is that an ``EffectiveEchoSpacing`` - field is set in the JSON sidecar. The following examples - use an ``'epi.nii.gz'`` file-stub which has 90 pixels in the - j-axis encoding direction. - - >>> meta = {'EffectiveEchoSpacing': 0.00059, - ... 'PhaseEncodingDirection': 'j-'} - >>> get_ees(meta) - 0.00059 - - If the *total readout time* :math:`T_\\text{ro}` (``TotalReadoutTime`` - BIDS field) is provided, then the effective echo spacing can be - calculated reading the number of voxels :math:`N_\\text{PE}` along the - readout direction and the parallel acceleration - factor of the EPI - - .. math :: - - = T_\\text{ro} \\, (N_\\text{PE} / f_\\text{acc} - 1)^{-1} - - where :math:`N_y` is the number of pixels along the phase-encoding direction - :math:`y`, and :math:`f_\\text{acc}` is the parallel imaging acceleration factor - (:abbr:`GRAPPA (GeneRalized Autocalibrating Partial Parallel Acquisition)`, - :abbr:`ARC (Autocalibrating Reconstruction for Cartesian imaging)`, etc.). - - >>> meta = {'TotalReadoutTime': 0.02596, - ... 'PhaseEncodingDirection': 'j-', - ... 'ParallelReductionFactorInPlane': 2} - >>> get_ees(meta, in_file='epi.nii.gz') - 0.00059 - - Some vendors, like Philips, store different parameter names - (see http://dbic.dartmouth.edu/pipermail/mrusers/attachments/\ -20141112/eb1d20e6/attachment.pdf): - - >>> meta = {'WaterFatShift': 8.129, - ... 'MagneticFieldStrength': 3, - ... 'PhaseEncodingDirection': 'j-', - ... 'ParallelReductionFactorInPlane': 2} - >>> get_ees(meta, in_file='epi.nii.gz') - 0.00041602630141921826 - - """ - - import nibabel as nb - - from qsirecon.interfaces.fmap import _get_pe_index - - # Use case 1: EES is defined - ees = in_meta.get("EffectiveEchoSpacing", None) - if ees is not None: - return ees - - # All other cases require the parallel acc and npe (N vox in PE dir) - acc = float(in_meta.get("ParallelReductionFactorInPlane", 1.0)) - npe = nb.load(in_file).shape[_get_pe_index(in_meta)] - etl = npe // acc - - # Use case 2: TRT is defined - trt = in_meta.get("TotalReadoutTime", None) - if trt is not None: - return trt / (etl - 1) - - # Use case 3 (philips scans) - wfs = in_meta.get("WaterFatShift", None) - if wfs is not None: - fstrength = in_meta["MagneticFieldStrength"] - wfd_ppm = 3.4 # water-fat diff in ppm - g_ratio_mhz_t = 42.57 # gyromagnetic ratio for proton (1H) in MHz/T - wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t - return wfs / (wfs_hz * etl) - - raise ValueError("Unknown effective echo-spacing specification") - - -def get_trt(in_meta, in_file=None): - """ - Calculate the *total readout time* for an input - :abbr:`EPI (echo-planar imaging)` scan. - - - There are several procedures to calculate the total - readout time. The basic one is that a ``TotalReadoutTime`` - field is set in the JSON sidecar. The following examples - use an ``'epi.nii.gz'`` file-stub which has 90 pixels in the - j-axis encoding direction. - - >>> meta = {'TotalReadoutTime': 0.02596} - >>> get_trt(meta) - 0.02596 - - If the *effective echo spacing* :math:`t_\\text{ees}` - (``EffectiveEchoSpacing`` BIDS field) is provided, then the - total readout time can be calculated reading the number - of voxels along the readout direction :math:`T_\\text{ro}` - and the parallel acceleration factor of the EPI :math:`f_\\text{acc}`. - - .. math :: - - T_\\text{ro} = t_\\text{ees} \\, (N_\\text{PE} / f_\\text{acc} - 1) - - >>> meta = {'EffectiveEchoSpacing': 0.00059, - ... 'PhaseEncodingDirection': 'j-', - ... 'ParallelReductionFactorInPlane': 2} - >>> get_trt(meta, in_file='epi.nii.gz') - 0.02596 - - Some vendors, like Philips, store different parameter names: - - >>> meta = {'WaterFatShift': 8.129, - ... 'MagneticFieldStrength': 3, - ... 'PhaseEncodingDirection': 'j-', - ... 'ParallelReductionFactorInPlane': 2} - >>> get_trt(meta, in_file='epi.nii.gz') - 0.018721183563864822 - - """ - - # Use case 1: TRT is defined - trt = in_meta.get("TotalReadoutTime", None) - if trt is not None: - return trt - - # All other cases require the parallel acc and npe (N vox in PE dir) - acc = float(in_meta.get("ParallelReductionFactorInPlane", 1.0)) - npe = nb.load(in_file).shape[_get_pe_index(in_meta)] - etl = npe // acc - - # Use case 2: TRT is defined - ees = in_meta.get("EffectiveEchoSpacing", None) - if ees is not None: - return ees * (etl - 1) - - # Use case 3 (philips scans) - wfs = in_meta.get("WaterFatShift", None) - if wfs is not None: - fstrength = in_meta["MagneticFieldStrength"] - wfd_ppm = 3.4 # water-fat diff in ppm - g_ratio_mhz_t = 42.57 # gyromagnetic ratio for proton (1H) in MHz/T - wfs_hz = fstrength * wfd_ppm * g_ratio_mhz_t - return wfs / wfs_hz - - raise ValueError("Unknown total-readout time specification") - - -def _get_pe_index(meta): - pe = meta["PhaseEncodingDirection"] - try: - return {"i": 0, "j": 1, "k": 2}[pe[0]] - except KeyError: - raise RuntimeError('"%s" is an invalid PE string' % pe) - - -def _torads(in_file, fmap_range=None, newpath=None): - """ - Convert a field map to rad/s units - - If fmap_range is None, the range of the fieldmap - will be automatically calculated. - - Use fmap_range=0.5 to convert from Hz to rad/s - """ - from math import pi - - import nibabel as nb - from nipype.utils.filemanip import fname_presuffix - - out_file = fname_presuffix(in_file, suffix="_rad", newpath=newpath) - fmapnii = nb.load(in_file) - fmapdata = fmapnii.get_fdata() - - if fmap_range is None: - fmap_range = max(abs(fmapdata.min()), fmapdata.max()) - fmapdata = fmapdata * (pi / fmap_range) - out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header) - out_img.set_data_dtype("float32") - out_img.to_filename(out_file) - return out_file, fmap_range - - -def _tohz(in_file, range_hz, newpath=None): - """Convert a field map to Hz units""" - from math import pi - - import nibabel as nb - from nipype.utils.filemanip import fname_presuffix - - out_file = fname_presuffix(in_file, suffix="_hz", newpath=newpath) - fmapnii = nb.load(in_file) - fmapdata = fmapnii.get_fdata() - fmapdata = fmapdata * (range_hz / pi) - out_img = nb.Nifti1Image(fmapdata, fmapnii.affine, fmapnii.header) - out_img.set_data_dtype("float32") - out_img.to_filename(out_file) - return out_file - - -def phdiff2fmap(in_file, delta_te, newpath=None): - r""" - Converts the input phase-difference map into a fieldmap in Hz, - using the eq. (1) of [Hutton2002]_: - - .. math:: - - \Delta B_0 (\text{T}^{-1}) = \frac{\Delta \Theta}{2\pi\gamma \Delta\text{TE}} - - - In this case, we do not take into account the gyromagnetic ratio of the - proton (:math:`\gamma`), since it will be applied inside TOPUP: - - .. math:: - - \Delta B_0 (\text{Hz}) = \frac{\Delta \Theta}{2\pi \Delta\text{TE}} - - """ - import math - - import nibabel as nb - import numpy as np - from nipype.utils.filemanip import fname_presuffix - - # GYROMAG_RATIO_H_PROTON_MHZ = 42.576 - - out_file = fname_presuffix(in_file, suffix="_fmap", newpath=newpath) - image = nb.load(in_file) - data = image.get_fdata().astype(np.float32) / (2.0 * math.pi * delta_te) - nii = nb.Nifti1Image(data, image.affine, image.header) - nii.set_data_dtype(np.float32) - nii.to_filename(out_file) - return out_file - - -def _delta_te(in_values, te1=None, te2=None): - """Read :math:`\Delta_\text{TE}` from BIDS metadata dict""" - if isinstance(in_values, float): - te2 = in_values - te1 = 0.0 - - if isinstance(in_values, dict): - te1 = in_values.get("EchoTime1") - te2 = in_values.get("EchoTime2") - - if not all((te1, te2)): - te2 = in_values.get("EchoTimeDifference") - te1 = 0 - - if isinstance(in_values, list): - te2, te1 = in_values - if isinstance(te1, list): - te1 = te1[1] - if isinstance(te2, list): - te2 = te2[1] - - # For convienience if both are missing we should give one error about them - if te1 is None and te2 is None: - raise RuntimeError( - "EchoTime1 and EchoTime2 metadata fields not found. " - "Please consult the BIDS specification." - ) - if te1 is None: - raise RuntimeError( - "EchoTime1 metadata field not found. Please consult the BIDS specification." - ) - if te2 is None: - raise RuntimeError( - "EchoTime2 metadata field not found. Please consult the BIDS specification." - ) - - return abs(float(te2) - float(te1)) - - -def read_nifti_sidecar(json_file): - if not json_file.endswith(".json"): - json_file = fname_presuffix(json_file, suffix=".json", use_ext=False) - if not op.exists(json_file): - raise Exception("No corresponding json file found") - - with open(json_file, "r") as f: - metadata = json.load(f) - pe_dir = metadata["PhaseEncodingDirection"] - slice_times = metadata.get("SliceTiming") - trt = metadata.get("TotalReadoutTime") - if trt is None: - pass - return {"PhaseEncodingDirection": pe_dir, "SliceTiming": slice_times, "TotalReadoutTime": trt} - - -acqp_lines = { - "i": "1 0 0 %.6f", - "j": "0 1 0 %.6f", - "k": "0 0 1 %.6f", - "i-": "-1 0 0 %.6f", - "j-": "0 -1 0 %.6f", - "k-": "0 0 -1 %.6f", -} - - -def get_topup_inputs_from( - dwi_file, - bval_file, - b0_threshold, - topup_prefix, - bids_origin_files, - epi_fmaps=None, - max_per_spec=3, - topup_requested=False, -): - """Create a datain spec and a slspec from a concatenated dwi series. - - Create inputs for TOPUP that come from data in ``dwi/`` and epi fieldmaps in ``fmap/``. - The ``nii_file`` input may be the result of concatenating a number of scans with different - distortions present. The original source of each volume in ``nii_file`` is listed in - ``bids_origin_files``. - - The strategy is to select ``max_per_spec`` b=0 images from each distortion group. - Here, distortion group uses the FSL definition of a phase encoding direction and - total readout time, as specified in the datain file used by TOPUP (i.e. "0 -1 0 0.087"). - - - Case: Two opposing PE direction dwi series - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - For example if the following b=0 images are found at the following indices into - ``nii_file``: - - ============ ============================= ================== - Image Index BIDS source file for a b=0 Distortion Group - ------------ ----------------------------- ------------------ - 0 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - 15 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - 30 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - 45 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - 60 sub-1_dir-AP_run-2_dwi.nii.gz ``0 -1 0 0.087`` - 75 sub-1_dir-AP_run-2_dwi.nii.gz ``0 -1 0 0.087`` - 90 sub-1_dir-AP_run-2_dwi.nii.gz ``0 -1 0 0.087`` - 105 sub-1_dir-AP_run-2_dwi.nii.gz ``0 -1 0 0.087`` - 120 sub-1_dir-PA_run-1_dwi.nii.gz ``0 1 0 0.087`` - 135 sub-1_dir-PA_run-1_dwi.nii.gz ``0 1 0 0.087`` - 150 sub-1_dir-PA_run-1_dwi.nii.gz ``0 1 0 0.087`` - 165 sub-1_dir-PA_run-1_dwi.nii.gz ``0 1 0 0.087`` - ============ ============================= ================== - - This will select images 0, 45 and 105 to represent the distortion group ``0 -1 0 0.087`` and - images 120, 135 and 165 to represent ``0 1 0 0.087``. The ``--datain`` file would then - contain:: - - 0 -1 0 0.087 - 0 -1 0 0.087 - 0 -1 0 0.087 - 0 1 0 0.087 - 0 1 0 0.087 - 0 1 0 0.087 - - Case: one DWI series and an EPI fieldmap - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - If a reverse-phase encoding fieldmap image (or images) are passed in through ``epi_fmaps``, - these will undergo the same selection process using ``max_per_spec``. The images will be - added to the *end* of the image series, though, to ensure that the fieldmap correction will - be aligned to the first b=0 image in ``nii_file``. For example if ``nii_file`` contains - - ============ ============================= ================== - Image Index BIDS source file for a b=0 Distortion Group - ------------ ----------------------------- ------------------ - 0 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - 15 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - 30 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - 45 sub-1_dir-AP_run-1_dwi.nii.gz ``0 -1 0 0.087`` - ============ ============================= ================== - - and the file from fmaps contains - - ============ ============================= ================== - Image Index BIDS source file for a b=0 Distortion Group - ------------ ----------------------------- ------------------ - 0 sub-1_dir-PA_epi.nii.gz ``0 1 0 0.087`` - 1 sub-1_dir-PA_epi.nii.gz ``0 1 0 0.087`` - ============ ============================= ================== - - images 0, 15 and 45 would be selected to represent ``0 -1 0 0.087`` and images 0 and 1 - would be selected to represent ``0 1 0 0.087``, resulting in a ``--datain`` file that - contains:: - - 0 -1 0 0.087 - 0 -1 0 0.087 - 0 -1 0 0.087 - 0 1 0 0.087 - 0 1 0 0.087 - - - Parameters: - =========== - - nii_file : str - A 4D DWI Series - b0_indices: array-like - indices into nii_file that can be used by topup - topup_prefix: str - file prefix for topup inputs - bids_origin_files: list - A list with the original bids file of each image in ``nii_file``. This is - necessary because merging may have happened earlier in the pipeline - epi_fmaps: - A list of b=0 images from the fmaps/ directory. - max_per_spec: int - The maximum number of b=0 images to extract from a PE direction / image set - - """ - - # Start with the DWI file. Determine which images are b=0 - bvals = np.loadtxt(bval_file) - b0_indices = np.flatnonzero(bvals < b0_threshold) - if not b0_indices.size: - raise RuntimeError("No b=0 images available for TOPUP from the dwi.") - dwi_nii = load_img(dwi_file) - # Gather images from just the dwi series - dwi_spec_lines, dwi_imain, dwi_report, _ = topup_inputs_from_4d_file( - dwi_nii, - b0_indices, - bids_origin_files, - image_source="combined DWI series", - max_per_spec=max_per_spec, - ) - - # If there are EPI fieldmaps, add them to the END of the topup spec - if epi_fmaps and isdefined(epi_fmaps): - topup_imain, topup_spec_lines, fmap_report = add_epi_fmaps_to_dwi_b0s( - epi_fmaps, b0_threshold, max_per_spec, dwi_spec_lines, dwi_imain - ) - topup_text = dwi_report + fmap_report - else: - topup_imain = dwi_imain - topup_spec_lines = dwi_spec_lines - topup_text = dwi_report - - imain_output = topup_prefix + "imain.nii.gz" - imain_img = to_lps(topup_imain, new_axcodes=("L", "A", "S")) - assert imain_img.shape[3] == len(topup_spec_lines) - imain_img.to_filename(imain_output) - - # Write the datain text file and make sure it's usable if it's needed - if len(set(topup_spec_lines)) < 2 and topup_requested: - print(topup_spec_lines) - raise Exception( - "Unable to run TOPUP: not enough distortion groups. " - 'Check "IntendedFor" fields or consider using --ignore fieldmaps.' - ) - - datain_file = topup_prefix + "datain.txt" - with open(datain_file, "w") as f: - f.write("\n".join(topup_spec_lines)) - - return datain_file, imain_output, topup_text - - -def load_epi_dwi_fieldmaps(fmap_list, b0_threshold): - """Creates a 4D image of b=0s from a list of input images. - - Parameters: - ----------- - - fmap_list: list - List of paths to epi fieldmap images - b0_threshold: int - Maximum b value for an image to be considered a b=0 - - Returns: - -------- - - concatenated_images: spatial image - The b=0 volumes concatenated into a 4D image - b0_indices: list - List of the original indices of the images in ``concatenated_images`` - original_files: list - List of the original files where each b=0 image came from. - - """ - # Add in the rpe data, if it exists - b0_indices = [] - original_files = [] - image_series = [] - - for fmap_file in fmap_list: - pth, fname, _ = split_filename(fmap_file) - potential_bval_file = op.join(pth, fname) + ".bval" - starting_index = len(original_files) - fmap_img = load_img(fmap_file) - image_series.append(fmap_img) - num_images = 1 if fmap_img.ndim == 3 else fmap_img.shape[3] - original_files += [fmap_file] * num_images - - # Which images are b=0 images? - if op.exists(potential_bval_file): - bvals = np.loadtxt(potential_bval_file) - too_large = np.flatnonzero(bvals > b0_threshold) - too_large_values = bvals[too_large] - if too_large.size: - LOGGER.warning( - "Excluding volumes %s from the %s because b=%s is greater than %d", - str(too_large), - fmap_file, - str(too_large_values), - b0_threshold, - ) - _b0_indices = np.flatnonzero(bvals < b0_threshold) + starting_index - else: - _b0_indices = np.arange(num_images) + starting_index - b0_indices += _b0_indices.tolist() - - concatenated_images = concat_imgs(image_series, auto_resample=True) - return concatenated_images, b0_indices, original_files - - -def topup_inputs_from_4d_file( - nii_file, - b0_indices, - bids_origin_files=None, - image_source="combined DWI series", - max_per_spec=3, -): - """Represent distortion groups from a concatenated image and its origins. - - Create inputs for TOPUP that come from data in ``dwi/`` and epi fieldmaps in ``fmap/``. - The ``nii_file`` input may be the result of concatenating a number of scans with different - distortions present. The original source of each volume in ``nii_file`` is listed in - ``bids_origin_files``. - - The strategy is to select ``max_per_spec`` b=0 images from each distortion group. - Here, distortion group uses the FSL definition of a phase encoding direction and - total readout time, as specified in the datain file used by TOPUP (i.e. "0 -1 0 0.087"). - - **Parameters** - - nii_file : Nibabel image - A 4D Image - b0_indices: array-like - indices into nii_file that can be used by topup - bids_origin_files: list - A list with the original bids file of each image in ``nii_file``. This is - necessary because merging may have happened earlier in the pipeline - max_per_spec: int - The maximum number of b=0 images to extract from a PE direction / image set - - - """ - - # Start with the DWI file. Determine which images are b=0 - if not len(b0_indices): - raise RuntimeError("No b=0 images available for TOPUP.") - - # find the original files accompanying each b=0 - b0_bids_origins = [bids_origin_files[idx] for idx in b0_indices] - - # Create a lookup-table for each file that was merged into nii_file - # spec_lookup maps original_bids_file -> acqp line - unique_files = list(set(b0_bids_origins)) - spec_lookup = {} - slicetime_lookup = {} - for unique_dwi in unique_files: - spec = read_nifti_sidecar(unique_dwi) - spec_line = acqp_lines[spec["PhaseEncodingDirection"]] - spec_lookup[unique_dwi] = spec_line % spec["TotalReadoutTime"] - slicetime_lookup[unique_dwi] = spec["SliceTiming"] - - # Which spec does each b=0 belong to? - spec_indices = defaultdict(list) - for b0_index, bids_file in zip(b0_indices, b0_bids_origins): - spec_line = spec_lookup[bids_file] - spec_indices[spec_line].append(b0_index) - - # The first image needs to be the first b=0 from the dwi series - first_b0_spec_line = spec_lookup[b0_bids_origins[0]] - first_b0_spec_indices = spec_indices.pop(first_b0_spec_line) - selected_b0_indices = get_evenly_spaced_b0s(first_b0_spec_indices, max_per_spec) - spec_lines = [first_b0_spec_line] * len(selected_b0_indices) - - # Iterate over the remaining unique spec lines - for spec_line in spec_indices: - spec_b0_indices = get_evenly_spaced_b0s(spec_indices[spec_line], max_per_spec) - selected_b0_indices += spec_b0_indices - spec_lines += [spec_line] * len(spec_b0_indices) - - # Load and subset the image - imain_nii = index_img(nii_file, selected_b0_indices) - report = topup_selection_to_report( - selected_b0_indices, bids_origin_files, spec_lookup, image_source=image_source - ) - - return spec_lines, imain_nii, report, spec_lookup - - -def get_evenly_spaced_b0s(b0_indices, max_per_spec): - """Choose up to ``max_per_spec`` b=0 images from a list of b0 indices.""" - if len(b0_indices) <= max_per_spec: - return b0_indices - selected_indices = np.linspace( - 0, len(b0_indices) - 1, num=max_per_spec, endpoint=True, dtype=int - ) - return [b0_indices[idx] for idx in selected_indices] - - -def add_epi_fmaps_to_dwi_b0s(epi_fmaps, b0_threshold, max_per_spec, dwi_spec_lines, dwi_imain): - """Add additional images from EPI fieldmaps for distortion correction. - - In order to fill out the maximum number of images per distortion group, images - from files in the fmap/ directory can be added to those already extracted from the - DWI series. - - Examples: - --------- - - >>> epi_fmaps = ["/data/sub-1/fmap/sub-1_dir-AP_epi.nii.gz", - ... "/data/sub-1/fmap/sub-1_dir-PA_epi.nii.gz"] - - """ - # Extract b=0 images as if we were only pulling images from epi fmaps. - fmaps_4d, fmap_b0_indices, fmap_original_files = load_epi_dwi_fieldmaps( - epi_fmaps, b0_threshold - ) - fmap_spec_lines, fmap_imain, fmap_report, fmap_spec_map = topup_inputs_from_4d_file( - fmaps_4d, - fmap_b0_indices, - fmap_original_files, - image_source="EPI fieldmap", - max_per_spec=max_per_spec, - ) - - # Check how many are present in each group from just the dwi files - spec_counts = defaultdict(int) - for dwi_spec in dwi_spec_lines: - spec_counts[dwi_spec] += 1 - - # Only add as many as you need to fill out max_per_spec - fmap_indices_to_add = [] - for image_num, epi_spec in enumerate(fmap_spec_lines): - if spec_counts[epi_spec] + 1 > max_per_spec: - continue - fmap_indices_to_add.append(image_num) - spec_counts[epi_spec] += 1 - - # No additional epi fmaps to add - if not fmap_indices_to_add: - return ( - dwi_imain, - dwi_spec_lines, - " No Additional images from EPI fieldmaps were added because the maximum " - "number of images per distortion group was reached.", - ) - - # Add the epi b=0's to the dwi b=0's - topup_imain = concat_imgs( - [dwi_imain, index_img(fmap_imain, fmap_indices_to_add)], auto_resample=True - ) - topup_spec_lines = dwi_spec_lines + [fmap_spec_lines[idx] for idx in fmap_indices_to_add] - - new_report = topup_selection_to_report( - fmap_indices_to_add, fmap_original_files, fmap_spec_map, image_source="EPI fieldmap" - ) - - return topup_imain, topup_spec_lines, new_report - - -def get_distortion_grouping(origin_file_list): - """Discover which distortion groups are present, then assign each volume to a group.""" - unique_files = sorted(set(origin_file_list)) - unique_acqps = [] - line_lookup = {} - for unique_dwi in unique_files: - spec = read_nifti_sidecar(unique_dwi) - spec_line = acqp_lines[spec["PhaseEncodingDirection"]] - acqp_line = spec_line % spec["TotalReadoutTime"] - if acqp_line not in unique_acqps: - unique_acqps.append(acqp_line) - line_lookup[unique_dwi] = unique_acqps.index(acqp_line) + 1 - - group_numbers = [line_lookup[dwi_file] for dwi_file in origin_file_list] - return unique_acqps, group_numbers - - -def eddy_inputs_from_dwi_files(origin_file_list, eddy_prefix): - unique_acqps, group_numbers = get_distortion_grouping(origin_file_list) - - # Create the acqp file - acqp_file = eddy_prefix + "acqp.txt" - with open(acqp_file, "w") as f: - f.write("\n".join(unique_acqps)) - - # Create the index file - index_file = eddy_prefix + "index.txt" - with open(index_file, "w") as f: - f.write(" ".join(map(str, group_numbers))) - - return acqp_file, index_file - - -class _ApplyScalingImagesInputSpec(ApplyTransformsInputSpec): - input_image = traits.File(mandatory=False) - scaling_image_files = InputMultiObject( - File(exists=True), - mandatory=False, - desc="list of sdc scaling image files in undistorted b0ref space", - ) - dwi_files = InputMultiObject( - File(exists=True), - mandatory=True, - desc="list of dwi files, already resampled into their output space", - ) - reference_image = File(exists=True, mandatory=True, desc="output grid") - - # Transforms to apply - b0_to_intramodal_template_transforms = InputMultiObject( - File(exists=True), - mandtory=False, - desc="list of transforms to register the b=0 to the intramodal template.", - ) - intramodal_template_to_t1_affine = File( - exists=True, mandatory=False, desc="affine from the intramodal template to t1" - ) - intramodal_template_to_t1_warp = File( - exists=True, desc="warp from the intramodal template to t1" - ) - hmcsdc_dwi_ref_to_t1w_affine = File(exists=True, desc="affine from dwi ref to t1w") - - save_cmd = traits.Bool( - True, usedefault=True, desc="write a log of command lines that were applied" - ) - copy_dtype = traits.Bool(False, usedefault=True, desc="copy dtype from inputs to outputs") - num_threads = traits.Int(1, usedefault=True, nohash=True, desc="number of parallel processes") - transforms = File(mandatory=False) - - -class _ApplyScalingImagesOutputSpec(TraitedSpec): - scaled_images = OutputMultiObject(File(exists=True), desc="Scaled dwi files") - - -class ApplyScalingImages(SimpleInterface): - input_spec = _ApplyScalingImagesInputSpec - output_spec = _ApplyScalingImagesOutputSpec - - def _run_interface(self, runtime): - - if not isdefined(self.inputs.scaling_image_files): - LOGGER.info("Not applying scaling to resampled DWIs") - self._results["scaled_images"] = self.inputs.dwi_files - return runtime - LOGGER.info("Applying scaling to resampled dwis") - - if not len(self.inputs.scaling_image_files) == len(self.inputs.dwi_files): - raise Exception("Mismatch between scaling images and dwis") - - # The affine transform to the t1 can come from hmcsdc or the intramodal template - coreg_to_t1 = traits.Undefined - if isdefined(self.inputs.intramodal_template_to_t1_affine): - if isdefined(self.inputs.hmcsdc_dwi_ref_to_t1w_affine): - LOGGER.warning("Two b0 to t1 transforms are provided: using intramodal") - coreg_to_t1 = self.inputs.intramodal_template_to_t1_affine - else: - coreg_to_t1 = self.inputs.hmcsdc_dwi_ref_to_t1w_affine - - # Handle transforms to intramodal transforms - intramodal_transforms = self.inputs.b0_to_intramodal_template_transforms - intramodal_affine = traits.Undefined - intramodal_warp = traits.Undefined - if isdefined(intramodal_transforms): - intramodal_affine = intramodal_transforms[0] - if len(intramodal_transforms) == 2: - intramodal_warp = intramodal_transforms[1] - elif len(intramodal_transforms) > 2: - raise Exception("Unsupported intramodal template transform") - - # Find the chain of transforms from undistorted b=0 reference to the output space - transform_stack = [ - transform - for transform in [intramodal_affine, intramodal_warp, coreg_to_t1] - if isdefined(transform) - ][::-1] - - # There are a few unique scaling images. Find them - scaling_images_to_dwis = defaultdict(list) - for dwi_image, scaling_image in zip( - self.inputs.dwi_files, self.inputs.scaling_image_files - ): - scaling_images_to_dwis[scaling_image].append(dwi_image) - - # Apply the transform, link the resampled scaling image to resampled dwis - dwi_files_to_scalings = {} - for scaling_image in scaling_images_to_dwis: - resampled_scaling_image = fname_presuffix( - scaling_image, suffix="_resampled", newpath=runtime.cwd - ) - xfm = ants.ApplyTransforms( - input_image=scaling_image, - transforms=transform_stack, - reference_image=self.inputs.reference_image, - output_image=resampled_scaling_image, - interpolation="LanczosWindowedSinc", - dimension=3, - ) - xfm.terminal_output = "allatonce" - xfm.resource_monitor = False - runtime = xfm.run().runtime - LOGGER.info(runtime.cmdline) - for dwi_file in scaling_images_to_dwis[scaling_image]: - dwi_files_to_scalings[dwi_file] = resampled_scaling_image - - # Do the math - scaled_dwi_images = [] - for dwi_file in self.inputs.dwi_files: - scaled_dwi_file = fname_presuffix(dwi_file, newpath=runtime.cwd, suffix="_scaled") - math_img("a*b", a=dwi_file, b=dwi_files_to_scalings[dwi_file]).to_filename( - scaled_dwi_file - ) - - scaled_dwi_images.append(scaled_dwi_file) - self._results["scaled_images"] = scaled_dwi_images - - return runtime - - -class _PEPOLARReportInputSpec(BaseInterfaceInputSpec): - fieldmap_type = traits.Enum("rpe_series", "epi") - b0_up_image = File(exists=True, mandatory=True) - b0_up_corrected_image = File(exists=True, mandatory=True) - b0_down_image = File(exists=True, mandatory=True) - b0_down_corrected_image = File(exists=True, mandatory=True) - up_fa_image = File(exists=True) - up_fa_corrected_image = File(exists=True) - down_fa_image = File(exists=True) - down_fa_corrected_image = File(exists=True) - t1w_seg = File(exists=True) - t2w_seg = File(exists=True) - - -class _PEPOLARReportOutputSpec(reporting.ReportCapableOutputSpec): - fa_sdc_report = File(exists=True) - b0_sdc_report = File(exists=True) - - -class PEPOLARReport(SimpleInterface): - input_spec = _PEPOLARReportInputSpec - output_spec = _PEPOLARReportOutputSpec - _n_cuts = 5 - - def _run_interface(self, runtime): - """Generate a reportlet.""" - LOGGER.info("Generating a PEPOLAR visual report") - - ref_segmentation = ( - self.inputs.t1w_seg if not isdefined(self.inputs.t2w_seg) else self.inputs.t2w_seg - ) - # Get a segmentation from an undistorted image as a reference - seg_img = nb.load(ref_segmentation) - b0_up_img = nb.load(self.inputs.b0_up_image) - b0_down_img = nb.load(self.inputs.b0_down_image) - b0_up_corrected_img = nb.load(self.inputs.b0_up_corrected_image) - b0_down_corrected_img = nb.load(self.inputs.b0_down_corrected_image) - cuts = cuts_from_bbox(seg_img, self._n_cuts) - b0_sdc_svg = op.join(runtime.cwd, "b0_blipupdown_sdc.svg") - compose_view( - plot_pepolar( - b0_up_img, - b0_down_img, - seg_img, - "moving-image", - estimate_brightness=True, - cuts=cuts, - label="Original", - upper_label_suffix=": Blip Up", - lower_label_suffix=": Blip Down", - compress=False, - ), - plot_pepolar( - b0_up_corrected_img, - b0_down_corrected_img, - seg_img, - "fixed-image", - estimate_brightness=True, - cuts=cuts, - label="Corrected", - upper_label_suffix=": Blip Up", - lower_label_suffix=": Blip Down", - compress=False, - ), - out_file=b0_sdc_svg, - ) - self._results["b0_sdc_report"] = b0_sdc_svg - - if False in map( - isdefined, - ( - self.inputs.up_fa_image, - self.inputs.up_fa_corrected_image, - self.inputs.down_fa_image, - self.inputs.down_fa_corrected_image, - ), - ): - LOGGER.info("No FA images available for SDC report") - return runtime - - # Prepare the FA images for plotting - fa_sdc_svg = op.join(runtime.cwd, "FA_reg.svg") - fa_up_img = nb.load(self.inputs.up_fa_image) - fa_up_corrected_img = nb.load(self.inputs.up_fa_corrected_image) - fa_down_img = nb.load(self.inputs.down_fa_image) - fa_down_corrected_img = nb.load(self.inputs.down_fa_corrected_image) - uncorrected_fa = nim.math_img("(a+b)/2", a=fa_up_img, b=fa_down_img) - corrected_fa = nim.math_img("(a+b)/2", a=fa_up_corrected_img, b=fa_down_corrected_img) - compose_view( - plot_fa_reg( - corrected_fa, - seg_img, - "moving-image", - estimate_brightness=False, - label="FA: After", - cuts=cuts, - ), - plot_fa_reg( - uncorrected_fa, - seg_img, - "fixed-image", - estimate_brightness=False, - label="FA: Before", - cuts=cuts, - ), - out_file=fa_sdc_svg, - ) - self._results["fa_sdc_report"] = fa_sdc_svg - return runtime - - -def plot_pepolar( - blip_up_img, - blip_down_img, - seg_contour_img, - div_id, - plot_params=None, - blip_down_plot_params=None, - order=("z", "x", "y"), - cuts=None, - estimate_brightness=False, - label=None, - blip_down_contour=None, - upper_label_suffix=": low-b", - lower_label_suffix=": high-b", - compress="auto", - overlay=None, - overlay_params=None, -): - """ - Plot the foreground and background views. - Default order is: axial, coronal, sagittal - - Updated version from sdcflows and different from in niworkflows.viz.utils - so that the contour lines never move. This is accomplished by making an empty - image in the grid of the segmentation image and using this as the background. - - """ - plot_params = plot_params or {} - blip_down_plot_params = blip_down_plot_params or {} - - if cuts is None: - raise NotImplementedError - - out_files = [] - if estimate_brightness: - plot_params = robust_set_limits( - blip_up_img.get_fdata(dtype="float32").reshape(-1), plot_params - ) - - zeros_bg_img = nim.new_img_like( - seg_contour_img, np.zeros(seg_contour_img.shape), copy_header=True - ) - - # Plot each cut axis for low-b - image_plot_params = plot_params.copy() - for i, mode in enumerate(list(order)): - plot_params["display_mode"] = mode - plot_params["cut_coords"] = cuts[mode] - if i == 0: - plot_params["title"] = label + upper_label_suffix - else: - plot_params["title"] = None - - # Generate nilearn figure - display = nip.plot_anat(zeros_bg_img, **plot_params) - display.add_overlay(blip_up_img, cmap="gray", **image_plot_params) - display.add_contours(seg_contour_img, colors="b", linewidths=0.5) - - svg = extract_svg(display, compress=compress) - display.close() - - # Find and replace the figure_1 id. - xml_data = etree.fromstring(svg) - find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) - find_text(xml_data)[0].set("id", "%s-%s-%s" % (div_id, mode, uuid4())) - - svg_fig = SVGFigure() - svg_fig.root = xml_data - out_files.append(svg_fig) - - # Plot each cut axis for high-b - if estimate_brightness: - blip_down_plot_params = robust_set_limits( - blip_down_img.get_fdata(dtype="float32").reshape(-1), blip_down_plot_params - ) - image_blip_down_plot_params = blip_down_plot_params.copy() - for i, mode in enumerate(list(order)): - blip_down_plot_params["display_mode"] = mode - blip_down_plot_params["cut_coords"] = cuts[mode] - if i == 0: - blip_down_plot_params["title"] = label + lower_label_suffix - else: - blip_down_plot_params["title"] = None - - # Generate nilearn figure - display = nip.plot_anat(zeros_bg_img, **blip_down_plot_params) - display.add_overlay(blip_down_img, cmap="gray", **image_blip_down_plot_params) - display.add_contours(seg_contour_img, colors="b", linewidths=0.5) - svg = extract_svg(display, compress=compress) - display.close() - - # Find and replace the figure_1 id. - xml_data = etree.fromstring(svg) - find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) - find_text(xml_data)[0].set("id", "%s-%s-%s" % (div_id, mode, uuid4())) - - svg_fig = SVGFigure() - svg_fig.root = xml_data - out_files.append(svg_fig) - - return out_files - - -def plot_fa_reg( - fa_img, - seg_contour_img, - div_id, - plot_params=None, - blip_down_plot_params=None, - order=("z", "x", "y"), - cuts=None, - estimate_brightness=False, - label=None, - compress="auto", -): - """ - Plot the foreground and background views. - Default order is: axial, coronal, sagittal - - Updated version from sdcflows and different from in niworkflows.viz.utils - so that the contour lines never move. This is accomplished by making an empty - image in the grid of the segmentation image and using this as the background. - - """ - plot_params = {"vmin": 0.01, "vmax": 0.85, "cmap": "gray"} - if cuts is None: - raise NotImplementedError - - out_files = [] - zeros_bg_img = nim.new_img_like( - seg_contour_img, np.zeros(seg_contour_img.shape), copy_header=True - ) - - # Plot each cut axis for low-b - image_plot_params = plot_params.copy() - for i, mode in enumerate(list(order)): - plot_params["display_mode"] = mode - plot_params["cut_coords"] = cuts[mode] - if i == 0: - plot_params["title"] = label - else: - plot_params["title"] = None - - # Generate nilearn figure - display = nip.plot_anat(zeros_bg_img, **plot_params) - display.add_overlay(fa_img, **image_plot_params) - # display.add_contours(seg_contour_img, colors='b', linewidths=0.5) - - svg = extract_svg(display, compress=compress) - display.close() - - # Find and replace the figure_1 id. - xml_data = etree.fromstring(svg) - find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) - find_text(xml_data)[0].set("id", "%s-%s-%s" % (div_id, mode, uuid4())) - - svg_fig = SVGFigure() - svg_fig.root = xml_data - out_files.append(svg_fig) - - return out_files diff --git a/qsirecon/interfaces/freesurfer.py b/qsirecon/interfaces/freesurfer.py index 0ea7e0ed..6f9a28bd 100644 --- a/qsirecon/interfaces/freesurfer.py +++ b/qsirecon/interfaces/freesurfer.py @@ -22,433 +22,6 @@ import os.path as op from pathlib import Path -import nibabel as nb -import numpy as np -from nilearn.image import new_img_like, resample_to_img -from nipype.interfaces import freesurfer as fs -from nipype.interfaces.afni import Zeropad -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - Directory, - File, - InputMultiPath, - SimpleInterface, - TraitedSpec, - isdefined, - traits, -) -from nipype.interfaces.freesurfer.base import FSCommandOpenMP, FSTraitedSpec -from nipype.interfaces.freesurfer.utils import LTAConvert -from nipype.utils.filemanip import copyfile, filename_to_list, fname_presuffix -from niworkflows.utils.images import _copyxform -from scipy.ndimage.morphology import binary_fill_holes -from skimage import morphology as sim - - -class FSTraitedSpecOpenMP(FSTraitedSpec): - num_threads = traits.Int(desc="allows for specifying more threads", nohash=True) - - -class StructuralReference(fs.RobustTemplate): - """Variation on RobustTemplate that simply copies the source if a single - volume is provided. - - >>> from qsirecon.utils.bids import collect_data - >>> t1w = collect_data('ds114', '01')[0]['t1w'] - >>> template = StructuralReference() - >>> template.inputs.in_files = t1w - >>> template.inputs.auto_detect_sensitivity = True - >>> template.cmdline # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE - 'mri_robust_template --satit --mov .../sub-01_ses-retest_T1w.nii.gz - .../sub-01_ses-test_T1w.nii.gz --template mri_robust_template_out.mgz' - - """ - - def _num_vols(self): - n_files = len(self.inputs.in_files) - if n_files != 1: - return n_files - - img = nb.load(self.inputs.in_files[0]) - if len(img.shape) == 3: - return 1 - - return img.shape[3] - - @property - def cmdline(self): - if self._num_vols() == 1: - return "echo Only one time point!" - return super(StructuralReference, self).cmdline - - def _list_outputs(self): - outputs = super(StructuralReference, self)._list_outputs() - if self._num_vols() == 1: - in_file = self.inputs.in_files[0] - outputs["out_file"] = in_file - if isdefined(outputs["transform_outputs"]): - transform_file = outputs["transform_outputs"][0] - fs.utils.LTAConvert( - in_lta="identity.nofile", - source_file=in_file, - target_file=in_file, - out_lta=transform_file, - ).run() - return outputs - - -class MakeMidthicknessInputSpec(fs.utils.MRIsExpandInputSpec): - graymid = InputMultiPath(desc="Existing graymid/midthickness file") - - -class MakeMidthickness(fs.MRIsExpand): - """Variation on MRIsExpand that checks for an existing midthickness/graymid - surface, and copies if available. - - mris_expand is an expensive operation, so this avoids re-running it when the - working directory is lost. - If users provide their own midthickness/graymid file, we assume they have - created it correctly. - """ - - input_spec = MakeMidthicknessInputSpec - - @property - def cmdline(self): - cmd = super(MakeMidthickness, self).cmdline - if not isdefined(self.inputs.graymid) or len(self.inputs.graymid) < 1: - return cmd - - # Possible graymid values inclue {l,r}h.{graymid,midthickness} - # Prefer midthickness to graymid, require to be of the same hemisphere - # as input - source = None - in_base = op.basename(self.inputs.in_file) - mt = self._associated_file(in_base, "midthickness") - gm = self._associated_file(in_base, "graymid") - - for surf in self.inputs.graymid: - if op.basename(surf) == mt: - source = surf - break - if op.basename(surf) == gm: - source = surf - - if source is None: - return cmd - - return "cp {} {}".format(source, self._list_outputs()["out_file"]) - - -class FSInjectBrainExtractedInputSpec(BaseInterfaceInputSpec): - subjects_dir = Directory(mandatory=True, desc="FreeSurfer SUBJECTS_DIR") - subject_id = traits.Str(mandatory=True, desc="Subject ID") - in_brain = File(mandatory=True, exists=True, desc="input file, part of a BIDS tree") - - -class FSInjectBrainExtractedOutputSpec(TraitedSpec): - subjects_dir = Directory(desc="FreeSurfer SUBJECTS_DIR") - subject_id = traits.Str(desc="Subject ID") - - -class FSInjectBrainExtracted(SimpleInterface): - input_spec = FSInjectBrainExtractedInputSpec - output_spec = FSInjectBrainExtractedOutputSpec - _always_run = True - - def _run_interface(self, runtime): - subjects_dir, subject_id = inject_skullstripped( - self.inputs.subjects_dir, self.inputs.subject_id, self.inputs.in_brain - ) - self._results["subjects_dir"] = subjects_dir - self._results["subject_id"] = subject_id - return runtime - - -class FSDetectInputsInputSpec(BaseInterfaceInputSpec): - t1w_list = InputMultiPath( - File(exists=True), mandatory=True, desc="input file, part of a BIDS tree" - ) - t2w_list = InputMultiPath(File(exists=True), desc="input file, part of a BIDS tree") - flair_list = InputMultiPath(File(exists=True), desc="input file, part of a BIDS tree") - hires_enabled = traits.Bool(True, usedefault=True, desc="enable hi-resolution processing") - - -class FSDetectInputsOutputSpec(TraitedSpec): - t2w = File(desc="reference T2w image") - use_t2w = traits.Bool(desc="enable use of T2w downstream computation") - flair = File(desc="reference FLAIR image") - use_flair = traits.Bool(desc="enable use of FLAIR downstream computation") - hires = traits.Bool(desc="enable hi-res processing") - mris_inflate = traits.Str(desc="mris_inflate argument") - - -class FSDetectInputs(SimpleInterface): - input_spec = FSDetectInputsInputSpec - output_spec = FSDetectInputsOutputSpec - - def _run_interface(self, runtime): - t2w, flair, self._results["hires"], mris_inflate = detect_inputs( - self.inputs.t1w_list, - t2w_list=self.inputs.t2w_list if isdefined(self.inputs.t2w_list) else None, - flair_list=self.inputs.flair_list if isdefined(self.inputs.flair_list) else None, - hires_enabled=self.inputs.hires_enabled, - ) - - self._results["use_t2w"] = t2w is not None - if self._results["use_t2w"]: - self._results["t2w"] = t2w - - self._results["use_flair"] = flair is not None - if self._results["use_flair"]: - self._results["flair"] = flair - - if self._results["hires"]: - self._results["mris_inflate"] = mris_inflate - - return runtime - - -class TruncateLTA(object): - """Mixin to ensure that LTA files do not store overly long paths, - which lead to segmentation faults when read by FreeSurfer tools. - - See the following issues for discussion: - - * https://github.com/freesurfer/freesurfer/pull/180 - * https://github.com/poldracklab/fmriprep/issues/768 - * https://github.com/poldracklab/fmriprep/pull/778 - * https://github.com/poldracklab/fmriprep/issues/1268 - * https://github.com/poldracklab/fmriprep/pull/1274 - """ - - # Use a tuple in case some object produces multiple transforms - lta_outputs = ("out_lta_file",) - - def _post_run_hook(self, runtime): - - outputs = self._list_outputs() - - for lta_name in self.lta_outputs: - lta_file = outputs[lta_name] - if not isdefined(lta_file): - continue - - with open(lta_file, "r") as f: - lines = f.readlines() - - fixed = False - newfile = [] - - for line in lines: - if line.startswith("filename = ") and len(line.strip("\n")) >= 255: - fixed = True - newfile.append("filename = path_too_long\n") - else: - newfile.append(line) - - if fixed: - with open(lta_file, "w") as f: - f.write("".join(newfile)) - - runtime = super(TruncateLTA, self)._post_run_hook(runtime) - - return runtime - - -class PatchedLTAConvert(TruncateLTA, LTAConvert): - """ - LTAconvert is producing a lta file refer as out_lta - truncate filename through mixin TruncateLTA - """ - - lta_outputs = ("out_lta",) - - -class RefineBrainMaskInputSpec(BaseInterfaceInputSpec): - in_anat = File(exists=True, mandatory=True, desc="input anatomical reference (INU corrected)") - in_aseg = File(exists=True, mandatory=True, desc="input ``aseg`` file, in NifTi format.") - in_ants = File( - exists=True, - mandatory=True, - desc="brain tissue segmentation generated with antsBrainExtraction.sh", - ) - - -class RefineBrainMaskOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="new mask") - - -class RefineBrainMask(SimpleInterface): - """ - Refine the brain mask implicit in the ``aseg.mgz`` - file to include possibly missing gray-matter voxels - and deep, wide sulci. - """ - - input_spec = RefineBrainMaskInputSpec - output_spec = RefineBrainMaskOutputSpec - - def _run_interface(self, runtime): - - self._results["out_file"] = fname_presuffix( - self.inputs.in_anat, suffix="_rbrainmask", newpath=runtime.cwd - ) - - anatnii = nb.load(self.inputs.in_anat) - msknii = nb.Nifti1Image( - grow_mask( - anatnii.get_fdata(), - nb.load(self.inputs.in_aseg).get_fdata(), - nb.load(self.inputs.in_ants).get_fdata(), - ), - anatnii.affine, - anatnii.header, - ) - msknii.set_data_dtype(np.uint8) - msknii.to_filename(self._results["out_file"]) - - return runtime - - -def inject_skullstripped(subjects_dir, subject_id, skullstripped): - mridir = op.join(subjects_dir, subject_id, "mri") - t1 = op.join(mridir, "T1.mgz") - bm_auto = op.join(mridir, "brainmask.auto.mgz") - bm = op.join(mridir, "brainmask.mgz") - - if not op.exists(bm_auto): - img = nb.load(t1) - mask = nb.load(skullstripped) - bmask = new_img_like(mask, mask.get_fdata() > 0) - resampled_mask = resample_to_img(bmask, img, "nearest") - masked_image = new_img_like(img, img.get_fdata() * resampled_mask.get_fdata()) - masked_image.to_filename(bm_auto) - - if not op.exists(bm): - copyfile(bm_auto, bm, copy=True, use_hardlink=True) - - return subjects_dir, subject_id - - -def detect_inputs(t1w_list, t2w_list=None, flair_list=None, hires_enabled=True): - t1w_list = filename_to_list(t1w_list) - t2w_list = filename_to_list(t2w_list) if t2w_list is not None else [] - flair_list = filename_to_list(flair_list) if flair_list is not None else [] - t1w_ref = nb.load(t1w_list[0]) - # Use high resolution preprocessing if voxel size < 1.0mm - # Tolerance of 0.05mm requires that rounds down to 0.9mm or lower - hires = hires_enabled and max(t1w_ref.header.get_zooms()) < 1 - 0.05 - - t2w = None - if t2w_list and max(nb.load(t2w_list[0]).header.get_zooms()) < 1.2: - t2w = t2w_list[0] - - # Prefer T2w to FLAIR if both present and T2w satisfies - flair = None - if flair_list and not t2w and max(nb.load(flair_list[0]).header.get_zooms()) < 1.2: - flair = flair_list[0] - - # https://surfer.nmr.mgh.harvard.edu/fswiki/SubmillimeterRecon - mris_inflate = "-n 50" if hires else None - return (t2w, flair, hires, mris_inflate) - - -def refine_aseg(aseg, ball_size=4): - """ - First step to reconcile ANTs' and FreeSurfer's brain masks. - - Here, the ``aseg.mgz`` mask from FreeSurfer is refined in two - steps, using binary morphological operations: - - 1. With a binary closing operation the sulci are included - into the mask. This results in a smoother brain mask - that does not exclude deep, wide sulci. - - 2. Fill any holes (typically, there could be a hole next to - the pineal gland and the corpora quadrigemina if the great - cerebral brain is segmented out). - - - """ - # Read aseg data - bmask = aseg.copy() - bmask[bmask > 0] = 1 - bmask = bmask.astype(np.uint8) - - # Morphological operations - selem = sim.ball(ball_size) - newmask = sim.binary_closing(bmask, selem) - newmask = binary_fill_holes(newmask.astype(np.uint8), selem).astype(np.uint8) - - return newmask.astype(np.uint8) - - -def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4): - """ - Grow mask including pixels that have a high likelihood. - GM tissue parameters are sampled in image patches of ``ww`` size. - - This is inspired on mindboggle's solution to the problem: - https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660 - - """ - selem = sim.ball(bw) - - if ants_segs is None: - ants_segs = np.zeros_like(aseg, dtype=np.uint8) - - aseg[aseg == 42] = 3 # Collapse both hemispheres - gm = anat.copy() - gm[aseg != 3] = 0 - - refined = refine_aseg(aseg) - newrefmask = sim.binary_dilation(refined, selem) - refined - indices = np.argwhere(newrefmask > 0) - for pixel in indices: - # When ATROPOS identified the pixel as GM, set and carry on - if ants_segs[tuple(pixel)] == 2: - refined[tuple(pixel)] = 1 - continue - - window = gm[ - pixel[0] - ww : pixel[0] + ww, - pixel[1] - ww : pixel[1] + ww, - pixel[2] - ww : pixel[2] + ww, - ] - if np.any(window > 0): - mu = window[window > 0].mean() - sigma = max(window[window > 0].std(), 1.0e-5) - zstat = abs(anat[tuple(pixel)] - mu) / sigma - refined[tuple(pixel)] = int(zstat < zval) - - refined = sim.binary_opening(refined, selem) - return refined - - -def medial_wall_to_nan(in_file, subjects_dir, target_subject, newpath=None): - """Convert values on medial wall to NaNs""" - import os - - import nibabel as nb - import numpy as np - - fn = os.path.basename(in_file) - if not target_subject.startswith("fs"): - return in_file - - cortex = nb.freesurfer.read_label( - os.path.join(subjects_dir, target_subject, "label", "{}.cortex.label".format(fn[:2])) - ) - func = nb.load(in_file) - medial = np.delete(np.arange(len(func.darrays[0].data)), cortex) - for darray in func.darrays: - darray.data[medial] = np.nan - - out_file = os.path.join(newpath or os.getcwd(), fn) - func.to_filename(out_file) - return out_file - def find_fs_path(freesurfer_dir, subject_id): if freesurfer_dir is None: @@ -460,169 +33,3 @@ def find_fs_path(freesurfer_dir, subject_id): if op.exists(withsub): return Path(withsub) return None - - -class _PrepareSynthStripGridInputSpec(BaseInterfaceInputSpec): - input_image = File(exists=True, mandatory=True) - - -class _PrepareSynthStripGridOutputSpec(TraitedSpec): - prepared_image = File(exists=True) - - -class PrepareSynthStripGrid(SimpleInterface): - input_spec = _PrepareSynthStripGridInputSpec - output_spec = _PrepareSynthStripGridOutputSpec - - def _run_interface(self, runtime): - out_fname = fname_presuffix( - self.inputs.input_image, - newpath=runtime.cwd, - suffix="_SynthStripGrid.nii", - use_ext=False, - ) - self._results["prepared_image"] = out_fname - - # possibly downsample the image for sloppy mode. Always ensure float32 - img = nb.load(self.inputs.input_image) - if not img.ndim == 3: - raise Exception("3D inputs are required for Synthstrip") - xvoxels, yvoxels, zvoxels = img.shape - - def get_padding(nvoxels): - extra_slices = nvoxels % 64 - if extra_slices == 0: - return 0 - complete_64s = nvoxels // 64 - return 64 * (complete_64s + 1) - nvoxels - - def split_padding(padding): - halfpad = padding // 2 - return halfpad, halfpad + halfpad % 2 - - spad = get_padding(zvoxels) - rpad, lpad = split_padding(get_padding(xvoxels)) - apad, ppad = split_padding(get_padding(yvoxels)) - - zeropad = Zeropad( - S=spad, - R=rpad, - L=lpad, - A=apad, - P=ppad, - in_files=self.inputs.input_image, - out_file=out_fname, - ) - - _ = zeropad.run() - assert op.exists(out_fname) - return runtime - - -class _SynthStripInputSpec(FSTraitedSpecOpenMP): - input_image = File(argstr="-i %s", exists=True, mandatory=True) - no_csf = traits.Bool(argstr="--no-csf", desc="Exclude CSF from brain border.") - border = traits.Int(argstr="-b %d", desc="Mask border threshold in mm. Default is 1.") - gpu = traits.Bool(argstr="-g") - out_brain = File( - argstr="-o %s", - name_template="%s_brain.nii.gz", - name_source=["input_image"], - keep_extension=False, - desc="skull stripped image with corrupt sform", - ) - out_brain_mask = File( - argstr="-m %s", - name_template="%s_mask.nii.gz", - name_source=["input_image"], - keep_extension=False, - desc="mask image with corrupt sform", - ) - - -class _SynthStripOutputSpec(TraitedSpec): - out_brain = File(exists=True) - out_brain_mask = File(exists=True) - - -class SynthStrip(FSCommandOpenMP): - input_spec = _SynthStripInputSpec - output_spec = _SynthStripOutputSpec - _cmd = "mri_synthstrip" - - def _num_threads_update(self): - if self.inputs.num_threads: - self.inputs.environ.update({"OMP_NUM_THREADS": "1"}) - - -class FixHeaderSynthStrip(SynthStrip): - - def _run_interface(self, runtime, correct_return_codes=(0,)): - # Run normally - runtime = super(FixHeaderSynthStrip, self)._run_interface(runtime, correct_return_codes) - - outputs = self._list_outputs() - if not op.exists(outputs["out_brain"]): - raise Exception("mri_synthstrip failed!") - - if outputs.get("out_brain_mask"): - _copyxform(self.inputs.input_image, outputs["out_brain_mask"]) - - _copyxform(self.inputs.input_image, outputs["out_brain"]) - - return runtime - - -class _SynthSegInputSpec(FSTraitedSpecOpenMP): - input_image = File(argstr="--i %s", exists=True, mandatory=True) - num_threads = traits.Int( - default=1, argstr="--threads %d", usedefault=True, desc="Number of threads to use" - ) - fast = traits.Bool(argstr="--fast", desc="fast predictions (lower quality).") - robust = traits.Bool(argstr="--robust", desc="use robust predictions (slower).") - out_seg = File( - argstr="--o %s", - name_template="%s_aseg.nii.gz", - name_source=["input_image"], - keep_extension=False, - desc="segmentation image", - ) - out_post = File( - argstr="--post %s", - name_template="%s_post.nii.gz", - name_source=["input_image"], - keep_extension=False, - desc="posteriors image", - ) - out_qc = File( - argstr="--qc %s", - name_template="%s_qc.csv", - name_source=["input_image"], - keep_extension=False, - desc="qc csv", - ) - cpu = traits.Bool( - True, argstr="--cpu", usedefault=True, desc="Enforce running with CPU rather than GPU." - ) - - -class _SynthSegOutputSpec(TraitedSpec): - out_seg = File(exists=True) - out_post = File(exists=True) - out_qc = File(exists=True) - - -class SynthSeg(FSCommandOpenMP): - input_spec = _SynthSegInputSpec - output_spec = _SynthSegOutputSpec - _cmd = "mri_synthseg" - - def _format_arg(self, name, trait_spec, value): - # Hardcode threads to be 1 - if name == "num_threads": - return "--threads 1" - return super()._format_arg(name, trait_spec, value) - - def _num_threads_update(self): - if self.inputs.num_threads: - self.inputs.environ.update({"OMP_NUM_THREADS": "1"}) diff --git a/qsirecon/interfaces/gradients.py b/qsirecon/interfaces/gradients.py index 48f4c6f2..2be923b2 100644 --- a/qsirecon/interfaces/gradients.py +++ b/qsirecon/interfaces/gradients.py @@ -1,34 +1,21 @@ """Handle merging and spliting of DSI files.""" import logging -import os import nibabel as nb import numpy as np -import pandas as pd -from dipy.core.geometry import normalized_vector -from dipy.reconst.dti import decompose_tensor -from dipy.sims.voxel import all_tensor_evecs from nilearn import image as nim -from nipype.interfaces import ants -from nipype.interfaces.ants.resampling import ApplyTransformsInputSpec from nipype.interfaces.base import ( BaseInterfaceInputSpec, File, - InputMultiObject, - OutputMultiObject, SimpleInterface, TraitedSpec, isdefined, traits, ) from nipype.utils.filemanip import fname_presuffix -from scipy.spatial.transform import Rotation as R -from sklearn.metrics import r2_score -from transforms3d.affines import decompose44 LOGGER = logging.getLogger("nipype.interface") -tensor_index = {"xx": (0, 0), "xy": (0, 1), "xz": (0, 2), "yy": (1, 1), "yz": (1, 2), "zz": (2, 2)} class RemoveDuplicatesInputSpec(BaseInterfaceInputSpec): @@ -119,177 +106,6 @@ def is_unique_sample(vec): return runtime -class SliceQCInputSpec(BaseInterfaceInputSpec): - uncorrected_dwi_files = InputMultiObject(File(exists=True), desc="uncorrected dwi files") - ideal_image_files = InputMultiObject(File(exists=True), desc="model-based images") - mask_image = File(exists=True, desc="brain mask") - impute_slice_threshold = traits.Float(0.0, desc="threshold for using imputed data in a slice") - min_slice_size_percentile = traits.CFloat( - 10.0, - usedefault=True, - desc="slices bigger than " "this percentile are candidates for imputation.", - ) - - -class SliceQCOutputSpec(TraitedSpec): - imputed_images = OutputMultiObject(File(exists=True), desc="dwi files with imputed slices") - slice_stats = File(exists=True, desc="npy file with the slice-by-TR error matrix") - - -class SliceQC(SimpleInterface): - input_spec = SliceQCInputSpec - output_spec = SliceQCOutputSpec - - def _run_interface(self, runtime): - ideal_image_files = self.inputs.ideal_image_files - uncorrected_image_files = self.inputs.uncorrected_dwi_files - - self._results["imputed_images"] = self.inputs.uncorrected_dwi_files - output_npz = os.path.join(runtime.cwd, "slice_stats.npz") - mask_img = nb.load(self.inputs.mask_image) - mask = mask_img.get_fdata() > 0 - masked_slices = (mask * np.arange(mask_img.shape[2])[np.newaxis, np.newaxis, :]).astype( - int - ) - slice_nums, slice_counts = np.unique(masked_slices[mask], return_counts=True) - min_size = np.percentile(slice_counts, self.inputs.min_slice_size_percentile) - too_small = slice_nums[slice_counts < min_size] - for small_slice in too_small: - masked_slices[masked_slices == small_slice] = 0 - valid_slices = slice_nums[slice_counts > min_size] - valid_slices = valid_slices[valid_slices > 0] - slice_scores = [] - wb_xcorrs = [] - wb_r2s = [] - # If impute slice threshold==0 or hmc_model=="none" - if isdefined(ideal_image_files): - for ideal_image, input_image in zip(ideal_image_files, uncorrected_image_files): - slices, wb_xcorr, wb_r2 = _score_slices( - ideal_image, input_image, masked_slices, valid_slices - ) - slice_scores.append(slices) - wb_xcorrs.append(wb_xcorr) - wb_r2s.append(wb_r2) - else: - num_trs = len(uncorrected_image_files) - num_slices = mask_img.shape[2] - wb_xcorrs = np.zeros(num_trs) - wb_r2s = np.zeros(num_trs) - slice_scores = np.zeros((num_slices, num_trs)) - - np.savez( - output_npz, - slice_scores=slice_scores, - wb_r2s=np.array(wb_r2s), - wb_xcorrs=np.array(wb_xcorrs), - valid_slices=valid_slices, - masked_slices=masked_slices, - slice_nums=slice_nums, - slice_counts=slice_counts, - ) - self._results["slice_stats"] = output_npz - return runtime - - -def _score_slices(ideal_image, input_image, masked_slices, valid_slices): - """Compute similarity metrics on a pair of images.""" - - def crosscor(vec1, vec2): - v1bar = vec1 - vec1.mean() - v2bar = vec2 - vec2.mean() - return np.inner(v1bar, v2bar) ** 2 / (np.inner(v1bar, v1bar) * np.inner(v2bar, v2bar)) - - slice_scores = np.zeros(valid_slices.shape) - ideal_data = nb.load(ideal_image).get_fdata() - input_data = nb.load(input_image).get_fdata() - for nslice, slicenum in enumerate(valid_slices): - slice_mask = masked_slices == slicenum - ideal_slice = ideal_data[slice_mask] - data_slice = input_data[slice_mask] - slice_scores[nslice] = crosscor(ideal_slice, data_slice) - - global_mask = masked_slices > 0 - wb_ideal = ideal_data[global_mask] - wb_input = input_data[global_mask] - global_xcorr = crosscor(wb_input, wb_ideal) - global_r2 = r2_score(wb_input, wb_ideal) - return slice_scores, global_xcorr, global_r2 - - -class CombineMotionsInputSpec(BaseInterfaceInputSpec): - transform_files = InputMultiObject( - File(exists=True), mandatory=True, desc="transform files from hmc" - ) - source_files = InputMultiObject(File(exists=True), mandatory=True, desc="Moving images") - ref_file = File(exists=True, mandatory=True, desc="Fixed Image") - - -class CombineMotionsOututSpec(TraitedSpec): - motion_file = File(exists=True) - spm_motion_file = File(exists=True) - - -class CombineMotions(SimpleInterface): - input_spec = CombineMotionsInputSpec - output_spec = CombineMotionsOututSpec - - def _run_interface(self, runtime): - collected_motion = [] - output_fname = os.path.join(runtime.cwd, "motion_params.csv") - output_spm_fname = os.path.join(runtime.cwd, "spm_movpar.txt") - ref_file = self.inputs.ref_file - for motion_file in self.inputs.transform_files: - collected_motion.append( - get_fsl_motion_params(motion_file, ref_file, ref_file, runtime.cwd) - ) - - final_motion = np.row_stack(collected_motion) - cols = [ - "scaleX", - "scaleY", - "scaleZ", - "shearXY", - "shearXZ", - "shearYZ", - "rotateX", - "rotateY", - "rotateZ", - "shiftX", - "shiftY", - "shiftZ", - ] - motion_df = pd.DataFrame(data=final_motion, columns=cols) - motion_df.to_csv(output_fname, index=False) - self._results["motion_file"] = output_fname - - spmcols = motion_df[["shiftX", "shiftY", "shiftZ", "rotateX", "rotateY", "rotateZ"]] - self._results["spm_motion_file"] = output_spm_fname - np.savetxt(output_spm_fname, spmcols.values) - - return runtime - - -class MatchTransformsInputSpec(BaseInterfaceInputSpec): - b0_indices = traits.List(mandatory=True) - dwi_files = InputMultiObject(File(exists=True), mandatory=True) - transforms = InputMultiObject(File(exists=True), mandatory=True) - - -class MatchTransformsOutputSpec(TraitedSpec): - transforms = OutputMultiObject(File(exists=True), mandatory=True) - - -class MatchTransforms(SimpleInterface): - input_spec = MatchTransformsInputSpec - output_spec = MatchTransformsOutputSpec - - def _run_interface(self, runtime): - self._results["transforms"] = match_transforms( - self.inputs.dwi_files, self.inputs.transforms, self.inputs.b0_indices - ) - return runtime - - class ExtractB0sInputSpec(BaseInterfaceInputSpec): b0_indices = traits.List() bval_file = File(exists=True) @@ -337,388 +153,6 @@ def _run_interface(self, runtime): return runtime -class ComposeTransformsInputSpec(ApplyTransformsInputSpec): - input_image = File(mandatory=False) - dwi_files = InputMultiObject(File(exists=True), mandatory=True, desc="list of dwi files") - reference_image = File(exists=True, mandatory=True, desc="output grid") - # Transforms to apply - hmc_affines = InputMultiObject(File(exists=True), desc="head motion correction affines") - fieldwarps = InputMultiObject( - File(exists=True), mandtory=False, desc="SDC unwarping transform" - ) - b0_to_intramodal_template_transforms = InputMultiObject( - File(exists=True), - mandtory=False, - desc="list of transforms to register the b=0 to " "the intramodal template.", - ) - intramodal_template_to_t1_affine = File( - exists=True, desc="affine from the intramodal template to t1" - ) - intramodal_template_to_t1_warp = File( - exists=True, desc="warp from the intramodal template to t1" - ) - hmcsdc_dwi_ref_to_t1w_affine = File(exists=True, desc="affine from dwi ref to t1w") - t1_2_mni_forward_transform = InputMultiObject( - File(exists=True), mandatory=False, desc="composite (h5) transform to mni" - ) - save_cmd = traits.Bool( - True, usedefault=True, desc="write a log of command lines that were applied" - ) - copy_dtype = traits.Bool(False, usedefault=True, desc="copy dtype from inputs to outputs") - num_threads = traits.Int(1, usedefault=True, nohash=True, desc="number of parallel processes") - transforms = File(mandatory=False) - - -class ComposeTransformsOutputSpec(TraitedSpec): - out_warps = OutputMultiObject(File(exists=True), desc="composed all transforms to output_grid") - out_affines = OutputMultiObject( - File(exists=True), desc="composed affine-only transforms to output_grid" - ) - transform_lists = OutputMultiObject( - traits.List(File(exists=True)), desc="lists of transforms for each image" - ) - log_cmdline = File(desc="a list of command lines used to apply transforms") - - -class ComposeTransforms(SimpleInterface): - input_spec = ComposeTransformsInputSpec - output_spec = ComposeTransformsOutputSpec - - def _run_interface(self, runtime): - dwi_files = self.inputs.dwi_files - num_dwis = len(dwi_files) - - # concatenate transformations to get into the desired output space - image_transforms = [] - image_transform_names = [] - - def include_transform(transform): - if not isdefined(transform): - return False - LOGGER.info("Including %s", transform) - return len(transform) == num_dwis - - hmc_affines = self.inputs.hmc_affines - fieldwarps = self.inputs.fieldwarps - if isdefined(fieldwarps): - if len(fieldwarps) == 1: - LOGGER.info("using a single fieldwarp for all DWI files") - fieldwarps = fieldwarps * num_dwis - elif len(fieldwarps) == num_dwis: - LOGGER.info("using DRBUDDI warps!") - else: - LOGGER.info("No Fieldwarps will be used") - - # The affine transform to the t1 can come from hmcsdc or the intramodal template - coreg_to_t1 = traits.Undefined - if isdefined(self.inputs.intramodal_template_to_t1_affine): - if isdefined(self.inputs.hmcsdc_dwi_ref_to_t1w_affine): - LOGGER.warning("Two b0 to t1 transforms are provided: using intramodal") - coreg_to_t1 = self.inputs.intramodal_template_to_t1_affine - else: - coreg_to_t1 = self.inputs.hmcsdc_dwi_ref_to_t1w_affine - if isdefined(coreg_to_t1): - coreg_to_t1 = [coreg_to_t1] * num_dwis - - # Handle transforms to intramodal transforms - intramodal_transforms = self.inputs.b0_to_intramodal_template_transforms - intramodal_affine = traits.Undefined - intramodal_warp = traits.Undefined - if isdefined(intramodal_transforms): - intramodal_affine = [intramodal_transforms[0]] * num_dwis - if len(intramodal_transforms) == 2: - intramodal_warp = [intramodal_transforms[1]] * num_dwis - elif len(intramodal_transforms) > 2: - raise Exception("Unsupported intramodal template transform") - - # If an intramodal template to t1 affine is present, copy for each dwi - intramodal_template_to_t1_affine = self.inputs.intramodal_template_to_t1_affine - if isdefined(intramodal_template_to_t1_affine): - intramodal_template_to_t1_affine = [intramodal_template_to_t1_affine] * num_dwis - - # If an intramodal template to t1 warp is present, copy for each dwi - intramodal_template_to_t1_warp = self.inputs.intramodal_template_to_t1_warp - if isdefined(intramodal_template_to_t1_warp): - intramodal_template_to_t1_affine = [intramodal_template_to_t1_warp] * num_dwis - - transform_order = [ - (hmc_affines, "hmc"), - (fieldwarps, "fieldwarp"), - (intramodal_affine, "to b=0 affine"), - (intramodal_warp, "to b=0 warp"), - (coreg_to_t1, "b=0 to T1w"), - ] - - for transform_list, transform_name in transform_order: - LOGGER.info(transform_name) - if include_transform(transform_list): - image_transforms.append(transform_list) - image_transform_names.append(transform_name) - - # Same t1-to-mni transform for every image - mni_xform = self.inputs.t1_2_mni_forward_transform - if isdefined(mni_xform): - assert len(mni_xform) == 2 - image_transforms.append([mni_xform[0]] * num_dwis) - image_transforms.append([mni_xform[1]] * num_dwis) - image_transform_names += ["mni affine", "mni warp"] - - # Check that all the transform lists have the same numbers of transforms - assert all( - [len(xform_list) == len(image_transforms[0]) for xform_list in image_transforms] - ) - - # If there is just a coreg transform, then we have everything - if image_transform_names == ["b=0 to T1w"]: - self._results["out_warps"] = image_transforms[0] - self._results["out_affines"] = image_transforms[0] - self._results["transform_lists"] = image_transforms - return runtime - - # Reverse the order for ANTs - image_transforms = image_transforms[::-1] - - # List of lists, one list per input file - xfms_list = [] - for image_num in range(num_dwis): - xfms_list.append([xfm[image_num] for xfm in image_transforms]) - - LOGGER.info("Composing %s transforms", " -> ".join(image_transform_names)) - - # Get all inputs from the ApplyTransforms object - ifargs = self.inputs.get() - - # Extract number of input images and transforms - # Get number of parallel jobs - num_threads = ifargs.pop("num_threads") - save_cmd = ifargs.pop("save_cmd") - - # Remove certain keys - for key in [ - "environ", - "ignore_exception", - "print_out_composite_warp_file", - "terminal_output", - "output_image", - "input_image", - "transforms", - "dwi_files", - "original_b0_indices", - "hmc_affines", - "b0_to_intramodal_template_transforms", - "intramodal_template_to_t1_affine", - "intramodal_template_to_t1_warp", - "fieldwarps", - "hmcsdc_dwi_ref_to_t1w_affine", - "interpolation", - "t1_2_mni_forward_transform", - "copy_dtype", - ]: - ifargs.pop(key, None) - - # In qsirecon the transforms have already been merged - assert len(xfms_list) == num_dwis - self._results["transform_lists"] = xfms_list - - # Inputs are ready to run in parallel - if num_threads < 1: - num_threads = None - - if num_threads == 1: - out_files = [ - _compose_tfms((in_file, in_xfm, ifargs, i, runtime.cwd)) - for i, (in_file, in_xfm) in enumerate(zip(dwi_files, xfms_list)) - ] - else: - from concurrent.futures import ThreadPoolExecutor - - with ThreadPoolExecutor(max_workers=num_threads) as pool: - mapper = pool.map( - _compose_tfms, - [ - (in_file, in_xfm, ifargs, i, runtime.cwd) - for i, (in_file, in_xfm) in enumerate(zip(dwi_files, xfms_list)) - ], - ) - out_files = list(mapper) - - # Collect output file names, after sorting by index - self._results["out_warps"] = [el[0] for el in out_files] - self._results["out_affines"] = [el[2] for el in out_files] - - if save_cmd: - self._results["log_cmdline"] = os.path.join(runtime.cwd, "command.txt") - with open(self._results["log_cmdline"], "w") as cmdfile: - print( - "\n-------\n".join(["\n-------\n".join([el[1], el[3]]) for el in out_files]), - file=cmdfile, - ) - return runtime - - -class GradientRotationInputSpec(BaseInterfaceInputSpec): - affine_transforms = InputMultiObject(File(exists=True), desc="ITK affine transforms") - original_images = InputMultiObject( - File(exists=True), desc="NIfTI images corresponding to bvals, bvecs" - ) - bvec_files = InputMultiObject( - File(exists=True), - desc="list of split bvec files, must correspond to a " - "non-oblique image/reference frame.", - mandatory=True, - ) - bval_files = InputMultiObject( - File(exists=True), desc="list of split bval files", mandatory=True - ) - - -class GradientRotationOutputSpec(TraitedSpec): - bvals = File(exists=True) - bvecs = File(exists=True) - log_cmdline = File(exists=True) - - -class GradientRotation(SimpleInterface): - """Reorient gradients accordint to transorms.""" - - input_spec = GradientRotationInputSpec - output_spec = GradientRotationOutputSpec - - def _run_interface(self, runtime): - out_root = os.path.join(runtime.cwd, "rotated") - - # Simple concatenation of bvals - bval_fname = out_root + ".bval" - concatenate_bvals(self.inputs.bval_files, bval_fname) - self._results["bvals"] = bval_fname - - bvec_fname = out_root + ".bvec" - bvecs = concatenate_bvecs(self.inputs.bvec_files) - commands = bvec_rotation(bvecs, self.inputs.affine_transforms, bvec_fname, runtime) - self._results["bvecs"] = bvec_fname - - self._results["log_cmdline"] = os.path.join(runtime.cwd, "command.txt") - with open(self._results["log_cmdline"], "w") as cmdfile: - print("\n-------\n".join(commands), file=cmdfile) - return runtime - - -class LocalGradientRotationInputSpec(GradientRotationInputSpec): - warp_transforms = InputMultiObject(File(exists=True), desc="Warps") - mask_image = File(exists=True, desc="brain mask in the output space") - bvec_files = InputMultiObject(File(exists=True), desc="list of split bvec files") - - -class LocalGradientRotationOutputSpec(TraitedSpec): - local_bvecs = File(exists=True) - log_cmdline = File(exists=True) - - -class LocalGradientRotation(SimpleInterface): - input_spec = LocalGradientRotationInputSpec - output_spec = LocalGradientRotationOutputSpec - - def _run_interface(self, runtime): - out_root = os.path.join(runtime.cwd, "rotated") - # Create the local bvecs - local_bvec_fname = out_root + "_local_bvecs.nii.gz" - self._results["local_bvecs"] = local_bvec_fname - original_bvecs = concatenate_bvecs(self.inputs.bvec_files) - commands = local_bvec_rotation( - original_bvecs, - self.inputs.warp_transforms, - self.inputs.mask_image, - runtime, - local_bvec_fname, - ) - self._results["log_cmdline"] = os.path.join(runtime.cwd, "command.txt") - with open(self._results["log_cmdline"], "w") as cmdfile: - print("\n-------\n".join(commands[1]), file=cmdfile) - return runtime - - -def get_fsl_motion_params(itk_file, src_file, ref_file, working_dir): - tmp_fsl_file = fname_presuffix(itk_file, newpath=working_dir, suffix="_FSL.xfm", use_ext=False) - fsl_convert_cmd = ( - "c3d_affine_tool " - "-ref {ref_file} " - "-src {src_file} " - "-itk {itk_file} " - "-ras2fsl -o {fsl_file}".format( - src_file=src_file, ref_file=ref_file, itk_file=itk_file, fsl_file=tmp_fsl_file - ) - ) - os.system(fsl_convert_cmd) - - def get_measures(line): - line = line.strip().split() - return np.array([float(num) for num in line[-3:]]) - - def get_image_center(src_fname): - # returns image center in mm - src_img = nb.load(src_fname) - src_aff = src_img.affine - src_center = (np.array(src_img.shape) - 1) / 2 - src_center_mm = nb.affines.apply_affine(src_aff, src_center) - src_offsets = src_aff[0:3, 3] - src_center_mm -= src_offsets - return src_center_mm - - def get_trans_from_offset(image_center, rotmat): - # offset[0] = trans[0] + center[0] - [rot[0,0]*center[0] - # +rot[0,1]*center[1] + rot[0,2]*center[2]] - trans = np.zeros((3,)) - offsets = rotmat[0:3, 3] - for i in range(3): - offpart = offsets[i] - image_center[i] - rotpart = ( - rotmat[i, 0] * image_center[0] - + rotmat[i, 1] * image_center[1] - + rotmat[i, 2] * image_center[2] - ) - trans[i] = offpart + rotpart - return trans - - img_center = get_image_center(src_file) - c3d_out_xfm = np.loadtxt(fname=tmp_fsl_file, dtype="float") - [T, Rotmat, Z, S] = decompose44(c3d_out_xfm) - T = get_trans_from_offset(img_center, c3d_out_xfm) - - flip = np.array([1, -1, -1]) - negflip = np.array([-1, 1, 1]) - Rotmat_to_convert = R.from_matrix(Rotmat) - Rotvec = Rotmat_to_convert.as_rotvec() - - rotation = Rotvec * negflip - translation = T * flip - scale = Z - shear = S - - return np.concatenate([scale, shear, rotation, translation]) - - -def match_transforms(dwi_files, transforms, b0_indices): - original_b0_indices = np.array(b0_indices) - num_dwis = len(dwi_files) - num_transforms = len(transforms) - - if num_dwis == num_transforms: - return transforms - - # Do sanity checks - if not len(transforms) == len(b0_indices): - raise Exception("number of transforms does not match number of b0 images") - - # Create a list of which hmc affines go with each of the split images - nearest_affines = [] - for index in range(num_dwis): - nearest_b0_num = np.argmin(np.abs(index - original_b0_indices)) - this_transform = transforms[nearest_b0_num] - nearest_affines.append(this_transform) - - return nearest_affines - - def concatenate_bvals(bval_list, out_file): """Create an FSL-style bvals file from split bval files.""" collected_vals = [] @@ -742,249 +176,3 @@ def concatenate_bvecs(input_files): if not stacked.shape[1] == 3: stacked = stacked.T return stacked - - -def write_concatenated_fsl_gradients(bval_files, bvec_files, out_prefix): - bvec_file = out_prefix + ".bvec" - bval_file = out_prefix + ".bval" - stacked_bvecs = concatenate_bvecs(bvec_files) - np.savetxt(bvec_file, stacked_bvecs.T, fmt="%.8f", delimiter=" ") - concatenate_bvals(bval_files, bval_file) - return bval_file, bvec_file - - -def bvec_rotation(ortho_bvecs, transforms, output_file, runtime): - """Rotate bvecs using antsApplyTransformsToPoints and antsTransformInfo. - - Parameters: - ----------- - - ortho_bvecs: np.ndarray (n, 3) - bvecs relative to a non-oblique output volume - - transforms: list - List of transform files that will be applied to the vectors - - original_images: list - List of images that correspond to the original bvecs. Used to - rotate the bvecs to world coordinates reference frame. - - output_file: str - Path to write the new bvec file - - runtime: runtime object - Nipype node runtime object - - """ - aattp_rotated = [] - commands = [] - for bvec, transform in zip(ortho_bvecs, transforms): - vec, cmd = aattp_rotate_vec(bvec, transform, runtime) - aattp_rotated.append(vec) - commands.append(cmd) - rotated_vecs = np.row_stack(aattp_rotated) - np.savetxt(output_file, rotated_vecs.T, fmt=str("%.8f")) - return commands - - -def aattp_rotate_vec(orig_vec, transform, runtime): - if (orig_vec**2).sum() == 0: - return orig_vec, "b0: No rotation" - - orig_txt = fname_presuffix( - transform, suffix="_pre_rotation.csv", newpath=runtime.cwd, use_ext=False - ) - rotated_txt = fname_presuffix( - transform, suffix="_post_rotation.csv", newpath=runtime.cwd, use_ext=False - ) - - # Save it for ants - with open(orig_txt, "w") as bvec_txt: - bvec_txt.write("x,y,z,t\n0.0,0.0,0.0,0.0\n") - bvec_txt.write(",".join(map(str, 5 * orig_vec)) + ",0.0\n") - - def unit_vector(vector): - """The unit vector of the vector.""" - return vector / np.linalg.norm(vector) - - # Only use the affine transforms for global bvecs - # Reverse order and inverse to antsApplyTransformsToPoints - transforms = "--transform [%s, 1]" % transform - cmd = ( - "antsApplyTransformsToPoints --dimensionality 3 --input " - + orig_txt - + " --output " - + rotated_txt - + " " - + transforms - ) - LOGGER.info(cmd) - os.system(cmd) - rotated_vec = np.loadtxt(rotated_txt, skiprows=1, delimiter=",")[:, :3] - rotated_unit_vec = unit_vector(rotated_vec[1] - rotated_vec[0]) - - return rotated_unit_vec, cmd - - -def _compose_tfms(args): - """Create a composite transform from inputs.""" - in_file, in_xform, ifargs, index, newpath = args - out_file = fname_presuffix( - in_file, suffix="_xform-%05d" % index, newpath=newpath, use_ext=True - ) - - xfm = ants.ApplyTransforms( - input_image=in_file, - transforms=in_xform, - output_image=out_file, - print_out_composite_warp_file=True, - interpolation="LanczosWindowedSinc", - **ifargs, - ) - xfm.terminal_output = "allatonce" - xfm.resource_monitor = False - runtime = xfm.run().runtime - LOGGER.info(runtime.cmdline) - - # Force floating point precision - nii = nb.load(out_file, mmap=False) - nii.set_data_dtype(np.dtype("float32")) - nii.to_filename(out_file) - - # Get just the affine Transforms - affines = [transform for transform in in_xform if ".nii" not in transform] - out_affine = fname_presuffix( - in_file, suffix="_affine_xform-%05d.mat" % index, newpath=newpath, use_ext=False - ) - affine_file, affine_cmd = compose_affines(ifargs["reference_image"], affines, out_affine) - - return (out_file, runtime.cmdline, affine_file, affine_cmd) - - -def compose_affines(reference_image, affine_list, output_file): - """Use antsApplyTransforms to get a single affine from multiple affines.""" - cmd = "antsApplyTransforms -d 3 -r %s -o Linear[%s] " % (reference_image, output_file) - cmd += " ".join(["--transform %s" % trf for trf in affine_list]) - LOGGER.info(cmd) - os.system(cmd) - if not os.path.exists(output_file): - LOGGER.critical(cmd) - assert False - return output_file, cmd - - -def create_tensor_image(mask_img, direction, prefix): - """set intent as NIFTI_INTENT_SYMMATRIX (1005), - [dxx, dxy, dyy, dxz, dyz, dzz] are the components - info from here - https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software - """ - out_fname = prefix + "_tensor.nii" - evecs = all_tensor_evecs(direction) - evals = np.diag([1.0, 0.5, 0.05]) - tensor = np.linalg.multi_dot([evecs, evals, evecs.T]) - - temp_components = [] - for direction in ["xx", "xy", "xz", "yy", "yz", "zz"]: - this_component = prefix + "_temp_dtiComp_%s.nii.gz" % direction - LOGGER.info("writing %s", this_component) - nb.Nifti1Image( - mask_img.get_fdata() * tensor[tensor_index[direction]], - mask_img.affine, - mask_img.header, - ).to_filename(this_component) - temp_components.append(this_component) - - compose_cmd = "ImageMath 3 %s ComponentTo3DTensor %s" % (out_fname, prefix + "_temp_dtiComp_") - LOGGER.info(compose_cmd) - os.system(compose_cmd) - for temp_component in temp_components: - os.remove(temp_component) - - return out_fname - - -def reorient_tensor_image(tensor_image, warp_file, mask_img, prefix, output_fname): - cmds = [] - to_remove = [] - reoriented_tensor_fname = prefix + "reoriented_tensor.nii" - reorient_cmd = "ReorientTensorImage 3 %s %s %s" % ( - tensor_image, - reoriented_tensor_fname, - warp_file, - ) - LOGGER.info(reorient_cmd) - os.system(reorient_cmd) - cmds.append(reorient_cmd) - to_remove.append(reoriented_tensor_fname) - - # Load the reoriented tensor and get the principal directions out - reoriented_dt_img = nb.load(reoriented_tensor_fname) - reoriented_tensor_data = reoriented_dt_img.get_fdata().squeeze() - - mask_data = mask_img.get_fdata() > 0 - output_data = np.zeros(mask_img.shape + (3,)) - - reoriented_tensors = reoriented_tensor_data[mask_data] - reoriented_vectors = np.zeros((reoriented_tensors.shape[0], 3)) - - def tensor_from_vec(vec): - """[dxx, dxy, dyy, dxz, dyz, dzz].""" - return np.array( - [[vec[0], vec[1], vec[3]], [vec[1], vec[2], vec[4]], [vec[3], vec[4], vec[5]]] - ) - - for nrow, row in enumerate(reoriented_tensors): - row_tensor = tensor_from_vec(row) - evals, evecs = decompose_tensor(row_tensor) - reoriented_vectors[nrow] = evecs[:, 0] - - output_data[mask_data] = normalized_vector(reoriented_vectors) - vector_data = get_vector_nii(output_data, mask_img.affine, mask_img.header) - vector_data.to_filename(output_fname) - os.remove(reoriented_tensor_fname) - os.remove(tensor_image) - return output_fname, reorient_cmd - - -def get_vector_nii(data, affine, header): - hdr = header.copy() - hdr.set_data_dtype(np.dtype(" 0 - label_sum = wm_mask.sum() - if label_sum < 30: - raise Exception("Very little white matter found: %d voxels" % wm_mask.sum()) - data = np.zeros(nii.shape, dtype=np.uint8) - data[wm_mask] = 1 - new = nb.Nifti1Image(data, nii.affine, nii.header) - new.set_data_dtype(np.uint8) - new.to_filename(out_file) - self._results["out"] = out_file - return runtime - - -class SplitDWIsBvalsInputSpec(BaseInterfaceInputSpec): - split_files = InputMultiObject(desc="pre-split DWI images") - bvec_file = File(desc="the bvec file") - bval_file = File(desc="the bval file") - deoblique_bvecs = traits.Bool(False, usedefault=True, desc="write LPS+ world coordinate bvecs") - b0_threshold = traits.Int( - 50, usedefault=True, desc="Maximum b-value that can be considered a b0" - ) - - -class SplitDWIsBvalsOutputSpec(TraitedSpec): - bval_files = OutputMultiObject(File(exists=True), desc="single volume bvals") - bvec_files = OutputMultiObject(File(exists=True), desc="single volume bvecs") - b0_images = OutputMultiObject(File(exists=True), desc="just the b0s") - b0_indices = traits.List(desc="list of original indices for each b0 image") - - -class SplitDWIsBvals(SimpleInterface): - input_spec = SplitDWIsBvalsInputSpec - output_spec = SplitDWIsBvalsOutputSpec - - def _run_interface(self, runtime): - - split_bval_files, split_bvec_files = split_bvals_bvecs( - self.inputs.bval_file, - self.inputs.bvec_file, - self.inputs.split_files, - self.inputs.deoblique_bvecs, - runtime.cwd, - ) - - bvalues = np.loadtxt(self.inputs.bval_file) - b0_indices = np.flatnonzero(bvalues < self.inputs.b0_threshold) - b0_paths = [self.inputs.split_files[idx] for idx in b0_indices] - self._results["bval_files"] = split_bval_files - self._results["bvec_files"] = split_bvec_files - self._results["b0_images"] = b0_paths - self._results["b0_indices"] = b0_indices.tolist() - - return runtime - - -class SplitDWIsFSLInputSpec(BaseInterfaceInputSpec): - dwi_file = File(desc="the dwi image") - bvec_file = File(desc="the bvec file") - bval_file = File(desc="the bval file") - deoblique_bvecs = traits.Bool(False, usedefault=True, desc="write LPS+ world coordinate bvecs") - b0_threshold = traits.Int( - 50, usedefault=True, desc="Maximum b-value that can be considered a b0" - ) - - -class SplitDWIsFSLOutputSpec(TraitedSpec): - dwi_files = OutputMultiObject(File(exists=True), desc="single volume dwis") - bval_files = OutputMultiObject(File(exists=True), desc="single volume bvals") - bvec_files = OutputMultiObject(File(exists=True), desc="single volume bvecs") - b0_images = OutputMultiObject(File(exists=True), desc="just the b0s") - b0_indices = traits.List(desc="list of original indices for each b0 image") - - -class SplitDWIsFSL(SimpleInterface): - input_spec = SplitDWIsFSLInputSpec - output_spec = SplitDWIsFSLOutputSpec - - def _run_interface(self, runtime): - split = fsl.Split(dimension="t", in_file=self.inputs.dwi_file) - split_dwi_files = split.run().outputs.out_files - - split_bval_files, split_bvec_files = split_bvals_bvecs( - self.inputs.bval_file, - self.inputs.bvec_file, - split_dwi_files, - self.inputs.deoblique_bvecs, - runtime.cwd, - ) - - bvalues = np.loadtxt(self.inputs.bval_file) - b0_indices = np.flatnonzero(bvalues < self.inputs.b0_threshold) - b0_paths = [split_dwi_files[idx] for idx in b0_indices] - self._results["dwi_files"] = split_dwi_files - self._results["bval_files"] = split_bval_files - self._results["bvec_files"] = split_bvec_files - self._results["b0_images"] = b0_paths - self._results["b0_indices"] = b0_indices.tolist() - - return runtime - - -def _flatten(in_list): - out_list = [] - for item in in_list: - if isinstance(item, (list, tuple)): - out_list.extend(item) - else: - out_list.append(item) - return out_list - - -class IntraModalMergeInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiObject(File(exists=True), mandatory=True, desc="input files") - hmc = traits.Bool(True, usedefault=True) - zero_based_avg = traits.Bool(True, usedefault=True) - to_lps = traits.Bool(True, usedefault=True) - - -class IntraModalMergeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="merged image") - out_avg = File(exists=True, desc="average image") - out_mats = OutputMultiObject(File(exists=True), desc="output matrices") - out_movpar = OutputMultiObject(File(exists=True), desc="output movement parameters") - - -class IntraModalMerge(SimpleInterface): - input_spec = IntraModalMergeInputSpec - output_spec = IntraModalMergeOutputSpec - - def _run_interface(self, runtime): - fsl_check = os.environ.get("FSL_BUILD") - if fsl_check == "no_fsl": - raise Exception( - """Container in use does not have FSL. To use this workflow, - please download the qsirecon container with FSL installed.""" - ) - from nipype.interfaces import fsl - - in_files = self.inputs.in_files - if not isinstance(in_files, list): - in_files = [self.inputs.in_files] - - # Generate output average name early - self._results["out_avg"] = fname_presuffix( - self.inputs.in_files[0], suffix="_avg", newpath=runtime.cwd - ) - - if self.inputs.to_lps: - in_files = [reorient(inf, newpath=runtime.cwd) for inf in in_files] - - if len(in_files) == 1: - filenii = nb.load(in_files[0]) - filedata = filenii.get_fdata() - - # magnitude files can have an extra dimension empty - if filedata.ndim == 5: - sqdata = np.squeeze(filedata) - if sqdata.ndim == 5: - raise RuntimeError("Input image (%s) is 5D" % in_files[0]) - else: - in_files = [ - fname_presuffix(in_files[0], suffix="_squeezed", newpath=runtime.cwd) - ] - nb.Nifti1Image(sqdata, filenii.affine, filenii.header).to_filename(in_files[0]) - - if np.squeeze(nb.load(in_files[0]).get_fdata()).ndim < 4: - self._results["out_file"] = in_files[0] - self._results["out_avg"] = in_files[0] - # TODO: generate identity out_mats and zero-filled out_movpar - return runtime - in_files = in_files[0] - else: - magmrg = fsl.Merge(dimension="t", in_files=self.inputs.in_files) - in_files = magmrg.run().outputs.merged_file - mcflirt = fsl.MCFLIRT( - cost="normcorr", save_mats=True, save_plots=True, ref_vol=0, in_file=in_files - ) - mcres = mcflirt.run() - self._results["out_mats"] = mcres.outputs.mat_file - self._results["out_movpar"] = mcres.outputs.par_file - self._results["out_file"] = mcres.outputs.out_file - - hmcnii = nb.load(mcres.outputs.out_file) - hmcdat = hmcnii.get_fdata().mean(axis=3) - if self.inputs.zero_based_avg: - hmcdat -= hmcdat.min() - - nb.Nifti1Image(hmcdat, hmcnii.affine, hmcnii.header).to_filename(self._results["out_avg"]) - - return runtime - - -CONFORMATION_TEMPLATE = """\t\t

Anatomical Conformation

-\t\t
    -\t\t\t
  • Input T1w images: {n_t1w}
  • -\t\t\t
  • Output orientation: LPS
  • -\t\t\t
  • Output dimensions: {dims}
  • -\t\t\t
  • Output voxel size: {zooms}
  • -\t\t\t
  • Discarded images: {n_discards}
  • -{discard_list} -\t\t
-""" - -DISCARD_TEMPLATE = """\t\t\t\t
  • {basename}
  • """ - - -class ConformInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, desc="Input image") - target_zooms = traits.Tuple( - traits.Float, traits.Float, traits.Float, desc="Target zoom information" - ) - target_shape = traits.Tuple( - traits.Int, traits.Int, traits.Int, desc="Target shape information" - ) - deoblique_header = traits.Bool(False, usedfault=True) - - -class ConformOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Conformed image") - transform = File(exists=True, desc="Conformation transform") - # report = File(exists=True, desc='reportlet about orientation') - - -class Conform(SimpleInterface): - """Conform a series of T1w images to enable merging. - - Performs two basic functions: - - 1. Orient to LPS (right-left, anterior-posterior, inferior-superior) - 2. Resample to target zooms (voxel sizes) and shape (number of voxels) - - """ - - input_spec = ConformInputSpec - output_spec = ConformOutputSpec - - def _run_interface(self, runtime): - # Load image, orient as LPS - fname = self.inputs.in_file - orig_img = nb.load(fname) - reoriented = to_lps(orig_img) - - # Set target shape information - target_zooms = np.array(self.inputs.target_zooms) - target_shape = np.array(self.inputs.target_shape) - target_span = target_shape * target_zooms - - zooms = np.array(reoriented.header.get_zooms()[:3]) - shape = np.array(reoriented.shape[:3]) - - # Reconstruct transform from orig to reoriented image - ornt_xfm = nb.orientations.inv_ornt_aff( - nb.io_orientation(reoriented.affine), orig_img.shape - ) - # Identity unless proven otherwise - target_affine = reoriented.affine.copy() - conform_xfm = np.eye(4) - # conform_xfm = np.diag([-1, -1, 1, 1]) - - xyz_unit = reoriented.header.get_xyzt_units()[0] - if xyz_unit == "unknown": - # Common assumption; if we're wrong, unlikely to be the only thing that breaks - xyz_unit = "mm" - - # Set a 0.05mm threshold to performing rescaling - atol = {"meter": 1e-5, "mm": 0.01, "micron": 10}[xyz_unit] - - # Rescale => change zooms - # Resize => update image dimensions - rescale = not np.allclose(zooms, target_zooms, atol=atol) - resize = not np.all(shape == target_shape) - if rescale or resize: - if rescale: - scale_factor = target_zooms / zooms - target_affine[:3, :3] = reoriented.affine[:3, :3].dot(np.diag(scale_factor)) - - if resize: - # The shift is applied after scaling. - # Use a proportional shift to maintain relative position in dataset - size_factor = target_span / (zooms * shape) - # Use integer shifts to avoid unnecessary interpolation - offset = reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3] - target_affine[:3, 3] = reoriented.affine[:3, 3] + offset.astype(int) - - data = nli.resample_img(reoriented, target_affine, target_shape).get_fdata() - conform_xfm = np.linalg.inv(reoriented.affine).dot(target_affine) - reoriented = reoriented.__class__(data, target_affine, reoriented.header) - - if self.inputs.deoblique_header: - is_oblique = np.any(np.abs(nb.affines.obliquity(reoriented.affine)) > 0) - if is_oblique: - LOGGER.warning("Removing obliquity from image affine") - new_affine = reoriented.affine.copy() - new_affine[:, :-1] = 0 - new_affine[(0, 1, 2), (0, 1, 2)] = reoriented.header.get_zooms()[:3] * np.sign( - reoriented.affine[(0, 1, 2), (0, 1, 2)] - ) - reoriented = nb.Nifti1Image(reoriented.get_fdata(), new_affine, reoriented.header) - - # Image may be reoriented, rescaled, and/or resized - if reoriented is not orig_img: - out_name = fname_presuffix(fname, suffix="_lps", newpath=runtime.cwd) - reoriented.to_filename(out_name) - transform = ornt_xfm.dot(conform_xfm) - if not np.allclose(orig_img.affine.dot(transform), target_affine): - LOGGER.warning("Check alignment of anatomical image.") - - else: - out_name = fname - transform = np.eye(4) - - mat_name = fname_presuffix(fname, suffix=".mat", newpath=runtime.cwd, use_ext=False) - np.savetxt(mat_name, transform, fmt="%.08f") - self._results["transform"] = mat_name - self._results["out_file"] = out_name - - return runtime - - class ConformDwiInputSpec(BaseInterfaceInputSpec): dwi_file = File(mandatory=True, desc="dwi image") bval_file = File(exists=True) @@ -475,35 +121,6 @@ def _run_interface(self, runtime): return runtime -class _ChooseInterpolatorInputSpec(BaseInterfaceInputSpec): - dwi_files = InputMultiObject(File(exists=True), mandatory=True) - output_resolution = traits.Float(mandatory=True) - - -class _ChooseInterpolatorOutputSpec(TraitedSpec): - interpolation_method = traits.Enum("LanczosWindowedSinc", "BSpline") - - -class ChooseInterpolator(SimpleInterface): - """If the requested output resolution is more than 10% smaller than the input, use BSpline.""" - - input_spec = _ChooseInterpolatorInputSpec - output_spec = _ChooseInterpolatorOutputSpec - - def _run_interface(self, runtime): - output_resolution = np.array([self.inputs.output_resolution] * 3) - interpolator = "LanczosWindowedSinc" - for input_file in self.inputs.dwi_files: - resolution_cutoff = 0.9 * np.array(nb.load(input_file).header.get_zooms()[:3]) - print(output_resolution, resolution_cutoff) - if np.any(output_resolution < resolution_cutoff): - interpolator = "BSpline" - LOGGER.warning("Using BSpline interpolation for upsampling") - break - self._results["interpolation_method"] = interpolator - return runtime - - class ValidateImageOutputSpec(TraitedSpec): out_file = File(exists=True, desc="validated image") out_report = File(exists=True, desc="HTML segment containing warning") @@ -658,48 +275,6 @@ def bvec_to_rasb(bval_file, bvec_file, img_file, workdir): return np.fromstring(out, dtype=float, sep=" ")[:3] -def split_bvals_bvecs(bval_file, bvec_file, img_files, deoblique, working_dir): - """Split bvals and bvecs into one text file per image.""" - if deoblique: - LOGGER.info("Converting oblique-image bvecs to world coordinate reference frame") - bvals, bvecs = read_bvals_bvecs(bval_file, bvec_file) - split_bval_files = [] - split_bvec_files = [] - for nsample, (bval, bvec, img_file) in enumerate(zip(bvals[:, None], bvecs, img_files)): - bval_fname = fname_presuffix(bval_file, suffix="_%04d" % nsample, newpath=working_dir) - bvec_suffix = "_ortho_%04d" % nsample if not deoblique else "_%04d" % nsample - bvec_fname = fname_presuffix(bvec_file, bvec_suffix, newpath=working_dir) - np.savetxt(bval_fname, bval) - np.savetxt(bvec_fname, bvec) - - # re-write the bvec deobliqued, if requested - if deoblique: - rasb = bvec_to_rasb(bval_fname, bvec_fname, img_file, working_dir) - # Convert to image axis orientation - ornt = nb.aff2axcodes(nb.load(img_file).affine) - flippage = np.array([1 if ornt[n] == "RAS"[n] else -1 for n in [0, 1, 2]]) - deobliqued_bvec = rasb * flippage - np.savetxt(bvec_fname, deobliqued_bvec) - - split_bval_files.append(bval_fname) - split_bvec_files.append(bvec_fname) - - return split_bval_files, split_bvec_files - - -def reorient(in_file, newpath=None): - """Reorient Nifti files to LPS.""" - out_file = fname_presuffix(in_file, suffix="_lps", newpath=newpath) - to_lps(nb.load(in_file)).to_filename(out_file) - return out_file - - -def reorient_to(in_file, orientation="LPS", newpath=None): - out_file = fname_presuffix(in_file, suffix="_" + orientation, newpath=newpath) - to_lps(in_file, tuple(orientation)).to_filename(out_file) - return out_file - - def to_lps(input_img, new_axcodes=("L", "P", "S")): if isinstance(input_img, str): input_img = nb.load(input_img) @@ -716,56 +291,3 @@ def to_lps(input_img, new_axcodes=("L", "P", "S")): return reoriented_img else: return input_img - - -class TSplitInputSpec(AFNICommandInputSpec): - in_file = File( - desc="input file to 3dTsplit4D", - argstr=" %s", - position=-1, - mandatory=True, - copyfile=False, - ) - out_name = File( - mandatory=True, - desc="output image file name", - argstr="-prefix %s.nii", - ) - digits = traits.Int( - argstr="-digits %d", desc="Number of digits to include in split file names" - ) - - -class TSplitOutputSpec(TraitedSpec): - out_files = OutputMultiPath(File(exists=True)) - - -class TSplit(AFNICommand): - """Converts a 3D + time dataset into multiple 3D volumes (one volume per file). - For complete details, see the `3dTsplit4D Documentation. - `_ - """ - - _cmd = "3dTsplit4D" - input_spec = TSplitInputSpec - output_spec = TSplitOutputSpec - - def _list_outputs(self): - """Create a Bunch which contains all possible files generated - by running the interface. Some files are always generated, others - depending on which ``inputs`` options are set. - Returns - ------- - outputs : Bunch object - Bunch object containing all possible files generated by - interface object. - If None, file was not generated - Else, contains path, filename of generated outputfile - """ - outputs = self._outputs().get() - outputs["out_files"] = sorted( - glob.glob( - os.path.join(os.getcwd(), "{outname}.**.nii".format(outname=self.inputs.out_name)) - ) - ) - return outputs diff --git a/qsirecon/interfaces/itk.py b/qsirecon/interfaces/itk.py deleted file mode 100644 index 778e6dd9..00000000 --- a/qsirecon/interfaces/itk.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -ITK files handling -~~~~~~~~~~~~~~~~~~ - - -""" -import os -import os.path as op -import subprocess -from mimetypes import guess_type - -import nibabel as nb -import numpy as np -import SimpleITK as sitk -from dipy.core import geometry as geom -from nipype import logging -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - InputMultiObject, - OutputMultiObject, - SimpleInterface, - TraitedSpec, - traits, -) -from nipype.utils.filemanip import fname_presuffix -from niworkflows.viz.utils import compose_view - -from ..viz.utils import plot_acpc - -LOGGER = logging.getLogger("nipype.interface") - - -class _AffineToRigidInputSpec(BaseInterfaceInputSpec): - affine_transform = InputMultiObject(File(exists=True, mandatory=True)) - - -class _AffineToRigidOutputSpec(TraitedSpec): - rigid_transform = traits.List(File(exists=True)) - rigid_transform_inverse = traits.List(File(exists=True)) - translation_transform = traits.List(File(exists=True)) - - -class AffineToRigid(SimpleInterface): - input_spec = _AffineToRigidInputSpec - output_spec = _AffineToRigidOutputSpec - - def _run_interface(self, runtime): - if len(self.inputs.affine_transform) > 1: - raise Exception("Only one transform allowed") - affine_transform = self.inputs.affine_transform[0] - rigid_itk, rigid_itk_inverse, translation_itk = itk_affine_to_rigid( - affine_transform, runtime.cwd - ) - self._results["rigid_transform"] = [rigid_itk] - self._results["rigid_transform_inverse"] = [rigid_itk_inverse] - self._results["translation_transform"] = [translation_itk] - return runtime - - -class _ACPCReportInputSpec(BaseInterfaceInputSpec): - translation_image = File(exists=True, desc="only translated to ACPC", mandatory=True) - rigid_image = File(exists=True, desc="rigid transformed to ACPC") - - -class _ACPCReportOutputSpec(TraitedSpec): - out_report = File(exists=True) - - -class ACPCReport(SimpleInterface): - input_spec = _ACPCReportInputSpec - output_spec = _ACPCReportOutputSpec - - def _run_interface(self, runtime): - out_report = runtime.cwd + "/ACPCReport.svg" - # Call composer - compose_view( - plot_acpc( - nb.load(self.inputs.translation_image), - "moving-image", - estimate_brightness=True, - label="Original", - compress=False, - ), - plot_acpc( - nb.load(self.inputs.rigid_image), - "fixed-image", - estimate_brightness=True, - label="AC-PC", - compress=False, - ), - out_file=out_report, - ) - self._results["out_report"] = out_report - - return runtime - - -class DisassembleTransformInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="ANTs composite transform (h5)") - - -class DisassembleTransformOutputSpec(TraitedSpec): - out_transforms = OutputMultiObject(File(exists=True)) - - -class DisassembleTransform(SimpleInterface): - """Sloppy interface to split h5 transforms to a warp and an affine.""" - - input_spec = DisassembleTransformInputSpec - output_spec = DisassembleTransformOutputSpec - - def _run_interface(self, runtime): - transforms = disassemble_transform(self.inputs.in_file, runtime.cwd) - self._results["out_transforms"] = transforms - return runtime - - -def _applytfms(args): - """ - Applies ANTs' antsApplyTransforms to the input image. - All inputs are zipped in one tuple to make it digestible by - multiprocessing's map - """ - import nibabel as nb - from nipype.utils.filemanip import fname_presuffix - from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms - - in_file, in_xform, ifargs, index, newpath = args - out_file = fname_presuffix( - in_file, suffix="_xform-%05d" % index, newpath=newpath, use_ext=True - ) - - copy_dtype = ifargs.pop("copy_dtype", False) - xfm = ApplyTransforms( - input_image=in_file, transforms=in_xform, output_image=out_file, **ifargs - ) - xfm.terminal_output = "allatonce" - xfm.resource_monitor = False - runtime = xfm.run().runtime - - if copy_dtype: - nii = nb.load(out_file) - in_dtype = nb.load(in_file).get_data_dtype() - - # Overwrite only iff dtypes don't match - if in_dtype != nii.get_data_dtype(): - nii.set_data_dtype(in_dtype) - nii.to_filename(out_file) - - return (out_file, runtime.cmdline) - - -def _arrange_xfms(transforms, num_files, tmp_folder): - """ - Convenience method to arrange the list of transforms that should be applied - to each input file. Not needed in qsirecon - """ - base_xform = ["#Insight Transform File V1.0", "#Transform 0"] - # Initialize the transforms matrix - xfms_T = [] - for i, tf_file in enumerate(transforms): - # If it is a deformation field, copy to the tfs_matrix directly - if guess_type(tf_file)[0] != "text/plain": - xfms_T.append([tf_file] * num_files) - continue - - with open(tf_file) as tf_fh: - tfdata = tf_fh.read().strip() - - # If it is not an ITK transform file, copy to the tfs_matrix directly - if not tfdata.startswith("#Insight Transform File"): - xfms_T.append([tf_file] * num_files) - continue - - # Count number of transforms in ITK transform file - nxforms = tfdata.count("#Transform") - - # Remove first line - tfdata = tfdata.split("\n")[1:] - - # If it is a ITK transform file with only 1 xform, copy to the tfs_matrix directly - if nxforms == 1: - xfms_T.append([tf_file] * num_files) - continue - - if nxforms != num_files: - raise RuntimeError( - "Number of transforms (%d) found in the ITK file does not match" - " the number of input image files (%d)." % (nxforms, num_files) - ) - - # At this point splitting transforms will be necessary, generate a base name - out_base = fname_presuffix( - tf_file, suffix="_pos-%03d_xfm-{:05d}" % i, newpath=tmp_folder.name - ).format - # Split combined ITK transforms file - split_xfms = [] - for xform_i in range(nxforms): - # Find start token to extract - startidx = tfdata.index("#Transform %d" % xform_i) - next_xform = base_xform + tfdata[startidx + 1 : startidx + 4] + [""] - xfm_file = out_base(xform_i) - with open(xfm_file, "w") as out_xfm: - out_xfm.write("\n".join(next_xform)) - split_xfms.append(xfm_file) - xfms_T.append(split_xfms) - - # Transpose back (only Python 3) - return list(map(list, zip(*xfms_T))) - - -def disassemble_transform(transform_file, cwd): - cmd = ["CompositeTransformUtil", "--disassemble", transform_file, "disassemble"] - affine_out = cwd + "/00_disassemble_AffineTransform.mat" - warp_out = cwd + "/01_disassemble_DisplacementFieldTransform.nii.gz" - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - LOGGER.info(" ".join(cmd)) - out, err = proc.communicate() - - if not op.exists(affine_out): - raise Exception("unable to unpack composite transform") - transforms = [affine_out] - if op.exists(warp_out): - transforms.append(warp_out) - return transforms - - -def compose_affines(reference_image, affine_list, output_file): - """Use antsApplyTransforms to get a single affine from multiple affines.""" - cmd = "antsApplyTransforms -d 3 -r %s -o Linear[%s, 1] " % (reference_image, output_file) - cmd += " ".join(["--transform %s" % trf for trf in affine_list]) - os.system(cmd) - assert os.path.exists(output_file) - return output_file - - -def itk_affine_to_rigid(transform_file, cwd): - """uses c3d_affine_tool and FSL's aff2rigid to convert an itk linear - transform from affine to rigid""" - - rigid_mat_file = cwd + "/6DOFrigid.mat" - translation_mat_file = cwd + "/translation.mat" - inverse_mat_file = cwd + "/6DOFinverse.mat" - raw_transform = sitk.ReadTransform(transform_file) - aff_transform = sitk.AffineTransform(3) - aff_transform.SetFixedParameters(raw_transform.GetFixedParameters()) - aff_transform.SetParameters(raw_transform.GetParameters()) - - full_matrix = np.eye(4) - full_matrix[:3, :3] = np.array(aff_transform.GetMatrix()).reshape((3, 3), order="C") - _, _, angles, _, _ = geom.decompose_matrix(full_matrix) - rot_mat = geom.euler_matrix(angles[0], angles[1], angles[2]) - - rigid = sitk.Euler3DTransform() - rigid.SetCenter(aff_transform.GetCenter()) - rigid.SetTranslation(aff_transform.GetTranslation()) - # Write a translation-only transform - sitk.WriteTransform(rigid, translation_mat_file) - # Write the full rigid (translation + rotation) transform - rigid.SetMatrix(tuple(rot_mat[:3, :3].flatten(order="C"))) - sitk.WriteTransform(rigid, rigid_mat_file) - # Write the inverse rigid transform - sitk.WriteTransform(rigid.GetInverse(), inverse_mat_file) - - if False in ( - op.exists(rigid_mat_file), - op.exists(translation_mat_file), - op.exists(inverse_mat_file), - ): - raise Exception("unable to create rigid AC-PC transform") - return rigid_mat_file, inverse_mat_file, translation_mat_file diff --git a/qsirecon/interfaces/mrtrix.py b/qsirecon/interfaces/mrtrix.py index a3a625c3..36622123 100644 --- a/qsirecon/interfaces/mrtrix.py +++ b/qsirecon/interfaces/mrtrix.py @@ -13,11 +13,9 @@ import zipfile from copy import deepcopy -import nibabel as nb import nipype.interfaces.utility as niu import nipype.pipeline.engine as pe import numpy as np -from nilearn.image import load_img, threshold_img from nipype import logging from nipype.interfaces.base import ( BaseInterfaceInputSpec, @@ -37,16 +35,8 @@ from nipype.interfaces.mrtrix3.tracking import Tractography, TractographyInputSpec from nipype.interfaces.mrtrix3.utils import Generate5ttInputSpec from nipype.utils.filemanip import fname_presuffix, split_filename, which -from niworkflows.viz.utils import compose_view, cuts_from_bbox from scipy.io.matlab import loadmat, savemat -from ..viz.utils import plot_denoise -from .denoise import ( - SeriesPreprocReport, - SeriesPreprocReportInputSpec, - SeriesPreprocReportOutputSpec, -) - LOGGER = logging.getLogger("nipype.interface") RC3_ROOT = which("average_response") # Only exists in RC3 if RC3_ROOT is not None: @@ -155,74 +145,6 @@ def _run_interface(self, runtime): return runtime -class DWIDenoiseInputSpec(MRTrix3BaseInputSpec, SeriesPreprocReportInputSpec): - in_file = File(exists=True, argstr="%s", position=-2, mandatory=True, desc="input DWI image") - mask = File(exists=True, argstr="-mask %s", position=1, desc="mask image") - extent = traits.Tuple( - (traits.Int, traits.Int, traits.Int), - argstr="-extent %d,%d,%d", - desc="set the window size of the denoising filter. (default = 5,5,5)", - ) - noise_image = File( - argstr="-noise %s", - name_template="%s_noise.nii.gz", - name_source=["in_file"], - keep_extension=False, - desc="the output noise map", - ) - out_file = File( - name_template="%s_denoised.nii.gz", - name_source=["in_file"], - keep_extension=False, - argstr="%s", - position=-1, - desc="the output denoised DWI image", - ) - out_report = File( - "dwidenoise_report.svg", usedefault=True, desc="filename for the visual report" - ) - - -class DWIDenoiseOutputSpec(SeriesPreprocReportOutputSpec): - noise_image = File(desc="the output noise map", exists=True) - out_file = File(desc="the output denoised DWI image", exists=True) - - -class DWIDenoise(SeriesPreprocReport, MRTrix3Base): - """ - Denoise DWI data and estimate the noise level based on the optimal - threshold for PCA. - - DWI data denoising and noise map estimation by exploiting data redundancy - in the PCA domain using the prior knowledge that the eigenspectrum of - random covariance matrices is described by the universal Marchenko Pastur - distribution. - - Important note: image denoising must be performed as the first step of the - image processing pipeline. The routine will fail if interpolation or - smoothing has been applied to the data prior to denoising. - - Note that this function does not correct for non-Gaussian noise biases. - - For more information, see - - - """ - - _cmd = "dwidenoise" - input_spec = DWIDenoiseInputSpec - output_spec = DWIDenoiseOutputSpec - - def _get_plotting_images(self): - input_dwi = load_img(self.inputs.in_file) - outputs = self._list_outputs() - ref_name = outputs.get("out_file") - denoised_nii = load_img(ref_name) - noise_name = outputs["noise_image"] - noisenii = load_img(noise_name) - return input_dwi, denoised_nii, noisenii - - class GenerateMasked5ttInputSpec(Generate5ttInputSpec): algorithm = traits.Enum( "fsl", @@ -1148,184 +1070,6 @@ def _rebids(name): return bidsified[:-1] -class DWIBiasCorrectInputSpec(MRTrix3BaseInputSpec, SeriesPreprocReportInputSpec): - in_file = File(exists=True, argstr="%s", position=-2, mandatory=True, desc="input DWI image") - mask = File(argstr="-mask %s", desc="input mask image for bias field estimation") - method = traits.Enum("ants", "fsl", argstr="%s", position=1, usedefault=True) - bias_image = File( - argstr="-bias %s", - name_source="in_file", - name_template="%s_bias.nii.gz", - keep_extension=False, - desc="bias field", - ) - out_file = File( - name_source="in_file", - keep_extension=False, - argstr="%s", - name_template="%s_N4.nii.gz", - position=-1, - desc="the output bias corrected DWI image", - ) - ants_b = traits.Str(default_value="[150,3]", argstr="-ants.b %s", usedefault=True) - ants_c = traits.Str(default_value="[200x200,1e-6]", argstr="-ants.c %s", usedefault=True) - ants_s = traits.Str(default_value="4", argstr="-ants.s %s") - out_report = File("n4_report.svg", usedefault=True, desc="filename for the visual report") - bzero_max = traits.Int( - argstr="-config BZeroThreshold %d", - desc="Maximum b-value that can be considered a b=0", - ) - - -class DWIBiasCorrectOutputSpec(SeriesPreprocReportOutputSpec): - bias_image = File(desc="the output bias field", exists=True) - out_file = File(desc="the output bias corrected DWI image", exists=True) - - -class DWIBiasCorrect(SeriesPreprocReport, MRTrix3Base): - """ - Perform B1 field inhomogeneity correction for a DWI volume series. - For more information, see - - Example - ------- - >>> import nipype.interfaces.mrtrix3 as mrt - >>> bias_correct = mrt.DWIBiasCorrect() - >>> bias_correct.inputs.in_file = 'dwi.mif' - >>> bias_correct.inputs.method = 'ants' - >>> bias_correct.cmdline - 'dwibiascorrect ants dwi.mif dwi_biascorr.mif' - >>> bias_correct.run() # doctest: +SKIP - """ - - _cmd = "dwibiascorrect" - input_spec = DWIBiasCorrectInputSpec - output_spec = DWIBiasCorrectOutputSpec - - def _get_plotting_images(self): - input_dwi = load_img(self.inputs.in_file) - outputs = self._list_outputs() - ref_name = outputs.get("out_file") - denoised_nii = load_img(ref_name) - noise_name = outputs["bias_image"] - noisenii = load_img(noise_name) - return input_dwi, denoised_nii, noisenii - - -class MRDeGibbsInputSpec(MRTrix3BaseInputSpec, SeriesPreprocReportInputSpec): - out_report = File("degibbs_report.svg", usedefault=True, desc="filename for the visual report") - in_file = File(exists=True, argstr="%s", position=-2, mandatory=True, desc="input DWI image") - out_file = File( - name_source="in_file", - keep_extension=False, - argstr="%s", - name_template="%s_mrdegibbs.nii.gz", - position=-1, - desc="the output de-Gibbs'd DWI image", - ) - mask = File(desc="input mask image for the visual report") - nshifts = traits.Int( - default=20, argstr="-nshifts %d", desc="discretization of subpixel spacing." - ) - axes = traits.Enum( - "0,1", - "0,2", - "1,2", - default="0,1", - argstr="-axes %s", - desc="select the slice axes (default: 0,1 - i.e. x-y)", - ) - minw = traits.Int( - default=1, argstr="-minW %d", desc="left border of window used for TV computation" - ) - maxw = traits.Int( - default=3, argstr="-maxW %d", desc="right border of window used for TV computation" - ) - - -class MRDeGibbsOutputSpec(SeriesPreprocReportOutputSpec): - out_file = File(desc="the output de-Gibbs'd DWI image") - - -class MRDeGibbs(SeriesPreprocReport, MRTrix3Base): - input_spec = MRDeGibbsInputSpec - output_spec = MRDeGibbsOutputSpec - _cmd = "mrdegibbs" - - def _get_plotting_images(self): - input_dwi = load_img(self.inputs.in_file) - outputs = self._list_outputs() - ref_name = outputs.get("out_file") - denoised_nii = load_img(ref_name) - return input_dwi, denoised_nii, None - - def _generate_report(self): - """Generate a reportlet.""" - LOGGER.info("Generating denoising visual report") - - input_dwi, denoised_nii, _ = self._get_plotting_images() - - # find an image to use as the background - image_data = input_dwi.get_fdata() - image_intensities = np.array([img.mean() for img in image_data.T]) - lowb_index = int(np.argmax(image_intensities)) - highb_index = int(np.argmin(image_intensities)) - - # Original images - orig_lowb_nii = input_dwi.slicer[..., lowb_index] - orig_highb_nii = input_dwi.slicer[..., highb_index] - - # Denoised images - denoised_lowb_nii = denoised_nii.slicer[..., lowb_index] - denoised_highb_nii = denoised_nii.slicer[..., highb_index] - - # Find spatial extent of the image - contour_nii = mask_nii = None - if isdefined(self.inputs.mask): - contour_nii = load_img(self.inputs.mask) - else: - mask_nii = threshold_img(denoised_lowb_nii, 50) - cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts) - - diff_lowb_nii = nb.Nifti1Image( - orig_lowb_nii.get_fdata() - denoised_lowb_nii.get_fdata(), - affine=denoised_lowb_nii.affine, - ) - diff_highb_nii = nb.Nifti1Image( - orig_highb_nii.get_fdata() - denoised_highb_nii.get_fdata(), - affine=denoised_highb_nii.affine, - ) - - # Call composer - compose_view( - plot_denoise( - denoised_lowb_nii, - denoised_highb_nii, - "moving-image", - estimate_brightness=True, - cuts=cuts, - label="De-Gibbs", - lowb_contour=None, - highb_contour=None, - compress=False, - ), - plot_denoise( - diff_lowb_nii, - diff_highb_nii, - "fixed-image", - estimate_brightness=True, - cuts=cuts, - label="Estimated Ringing", - lowb_contour=None, - highb_contour=None, - compress=False, - ), - out_file=self._out_report, - ) - - self._calculate_nmse(input_dwi, denoised_nii) - - class _ITKTransformConvertInputSpec(CommandLineInputSpec): in_transform = traits.File(exists=True, argstr="%s", mandatory=True, position=0) operation = traits.Enum( @@ -1370,54 +1114,3 @@ class TransformHeader(CommandLine): input_spec = _TransformHeaderInputSpec output_spec = _TransformHeaderOutputSpec _cmd = "mrtransform -strides -1,-2,3" - - -class _PolarToComplexInputSpec(CommandLineInputSpec): - mag_file = traits.File(exists=True, mandatory=True, position=0, argstr="%s") - phase_file = traits.File(exists=True, mandatory=True, position=1, argstr="%s") - out_file = traits.File( - exists=False, - name_source="mag_file", - name_template="%s_complex.nii.gz", - keep_extension=False, - position=-1, - argstr="-polar %s", - ) - - -class _PolarToComplexOutputSpec(TraitedSpec): - out_file = File(exists=True) - - -class PolarToComplex(CommandLine): - """Convert a magnitude and phase image pair to a single complex image using mrcalc.""" - - input_spec = _PolarToComplexInputSpec - output_spec = _PolarToComplexOutputSpec - - _cmd = "mrcalc" - - -class _ComplexToMagnitudeInputSpec(CommandLineInputSpec): - complex_file = traits.File(exists=True, mandatory=True, position=0, argstr="%s") - out_file = traits.File( - exists=False, - name_source="complex_file", - name_template="%s_mag.nii.gz", - keep_extension=False, - position=-1, - argstr="-abs %s", - ) - - -class _ComplexToMagnitudeOutputSpec(TraitedSpec): - out_file = File(exists=True) - - -class ComplexToMagnitude(CommandLine): - """Extract the magnitude portion of a complex image using mrcalc.""" - - input_spec = _ComplexToMagnitudeInputSpec - output_spec = _ComplexToMagnitudeOutputSpec - - _cmd = "mrcalc" diff --git a/qsirecon/interfaces/nilearn.py b/qsirecon/interfaces/nilearn.py deleted file mode 100644 index ee7a5c7e..00000000 --- a/qsirecon/interfaces/nilearn.py +++ /dev/null @@ -1,481 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Image tools interfaces -~~~~~~~~~~~~~~~~~~~~~~ - - -""" -import os -from tempfile import NamedTemporaryFile - -import nibabel as nb -import numpy as np -from dipy.segment.threshold import otsu -from nilearn.image import concat_imgs, load_img, math_img, new_img_like -from nilearn.masking import _post_process_mask, compute_epi_mask -from nilearn.plotting import plot_epi -from nipype import logging -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - InputMultiPath, - SimpleInterface, - TraitedSpec, - isdefined, - traits, -) -from nipype.utils.filemanip import fname_presuffix -from scipy import ndimage -from scipy.ndimage.morphology import binary_fill_holes -from skimage import morphology as sim -from skimage.segmentation import watershed -from sklearn.preprocessing import power_transform, robust_scale - -LOGGER = logging.getLogger("nipype.interface") - - -class MaskEPIInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiPath(File(exists=True), mandatory=True, desc="input EPI or list of files") - lower_cutoff = traits.Float(0.2, usedefault=True) - upper_cutoff = traits.Float(0.85, usedefault=True) - connected = traits.Bool(True, usedefault=True) - enhance_t2 = traits.Bool(False, usedefault=True, desc="enhance T2 contrast on image") - opening = traits.Int(2, usedefault=True) - closing = traits.Bool(True, usedefault=True) - fill_holes = traits.Bool(True, usedefault=True) - exclude_zeros = traits.Bool(False, usedefault=True) - ensure_finite = traits.Bool(True, usedefault=True) - target_affine = traits.Either(None, traits.File(exists=True), default=None, usedefault=True) - target_shape = traits.Either(None, traits.File(exists=True), default=None, usedefault=True) - no_sanitize = traits.Bool(False, usedefault=True) - - -class MaskEPIOutputSpec(TraitedSpec): - out_mask = File(exists=True, desc="output mask") - - -class MaskEPI(SimpleInterface): - input_spec = MaskEPIInputSpec - output_spec = MaskEPIOutputSpec - - def _run_interface(self, runtime): - - in_files = self.inputs.in_files - - if self.inputs.enhance_t2: - in_files = [_enhance_t2_contrast(f, newpath=runtime.cwd) for f in in_files] - - masknii = compute_epi_mask( - in_files, - lower_cutoff=self.inputs.lower_cutoff, - upper_cutoff=self.inputs.upper_cutoff, - connected=self.inputs.connected, - opening=self.inputs.opening, - exclude_zeros=self.inputs.exclude_zeros, - ensure_finite=self.inputs.ensure_finite, - target_affine=self.inputs.target_affine, - target_shape=self.inputs.target_shape, - ) - - if self.inputs.closing: - closed = sim.binary_closing(masknii.get_fdata().astype(np.uint8), sim.ball(1)).astype( - np.uint8 - ) - masknii = masknii.__class__(closed, masknii.affine, masknii.header) - - if self.inputs.fill_holes: - filled = binary_fill_holes(masknii.get_fdata().astype(np.uint8), sim.ball(6)).astype( - np.uint8 - ) - masknii = masknii.__class__(filled, masknii.affine, masknii.header) - - if self.inputs.no_sanitize: - in_file = self.inputs.in_files - if isinstance(in_file, list): - in_file = in_file[0] - nii = nb.load(in_file) - qform, code = nii.get_qform(coded=True) - masknii.set_qform(qform, int(code)) - sform, code = nii.get_sform(coded=True) - masknii.set_sform(sform, int(code)) - - self._results["out_mask"] = fname_presuffix( - self.inputs.in_files[0], suffix="_mask", newpath=runtime.cwd - ) - masknii.to_filename(self._results["out_mask"]) - return runtime - - -class MergeInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiPath( - File(exists=True), mandatory=True, desc="input list of files to merge" - ) - dtype = traits.Enum( - "f4", - "f8", - "u1", - "u2", - "u4", - "i2", - "i4", - usedefault=True, - desc="numpy dtype of output image", - ) - header_source = File(exists=True, desc="a Nifti file from which the header should be copied") - compress = traits.Bool(True, usedefault=True, desc="Use gzip compression on .nii output") - is_dwi = traits.Bool(True, usedefault=True, desc="if True, negative values are set to zero") - - -class MergeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output merged file") - - -class Merge(SimpleInterface): - input_spec = MergeInputSpec - output_spec = MergeOutputSpec - - def _run_interface(self, runtime): - ext = ".nii.gz" if self.inputs.compress else ".nii" - self._results["out_file"] = fname_presuffix( - self.inputs.in_files[0], suffix="_merged" + ext, newpath=runtime.cwd, use_ext=False - ) - new_nii = concat_imgs(self.inputs.in_files, dtype=self.inputs.dtype) - - if isdefined(self.inputs.header_source): - src_hdr = nb.load(self.inputs.header_source).header - new_nii.header.set_xyzt_units(t=src_hdr.get_xyzt_units()[-1]) - new_nii.header.set_zooms( - list(new_nii.header.get_zooms()[:3]) + [src_hdr.get_zooms()[3]] - ) - if self.inputs.is_dwi: - new_nii = nb.Nifti1Image(np.abs(new_nii.get_fdata()), new_nii.affine, new_nii.header) - - new_nii.to_filename(self._results["out_file"]) - - return runtime - - -class _EnhanceB0InputSpec(BaseInterfaceInputSpec): - b0_file = File(exists=True, mandatory=True) - - -class _EnhanceB0OutputSpec(TraitedSpec): - mask_file = File(exists=True) - bias_corrected_file = File(exists=True) - enhanced_file = File(exists=True) - - -class EnhanceB0(SimpleInterface): - input_spec = _EnhanceB0InputSpec - output_spec = _EnhanceB0OutputSpec - - def _run_interface(self, runtime): - input_img = nb.squeeze_image(load_img(self.inputs.b0_file)) - bias_corrected, bias_img = biascorrect(input_img, cwd=runtime.cwd) - out_bias_corrected = fname_presuffix( - self.inputs.b0_file, suffix="_unbiased", newpath=runtime.cwd - ) - bias_corrected.to_filename(out_bias_corrected) - self._results["bias_corrected_file"] = out_bias_corrected - - # Sharpen the bias-corrected image - out_enhanced = fname_presuffix( - self.inputs.b0_file, suffix="_unbiasedsharpened", newpath=runtime.cwd - ) - enhanced = run_imagemath(bias_corrected, "Sharpen", [], cwd=runtime.cwd) - enhanced.to_filename(out_enhanced) - self._results["enhanced_file"] = out_enhanced - - return runtime - - -def _enhance_t2_contrast(in_file, newpath=None, offset=0.5): - """ - Performs a logarithmic transformation of intensity that - effectively splits brain and background and makes the - overall distribution more Gaussian. - """ - out_file = fname_presuffix(in_file, suffix="_t1enh", newpath=newpath) - nii = nb.load(in_file) - data = nii.get_fdata() - maxd = data.max() - newdata = np.log(offset + data / maxd) - newdata -= newdata.min() - newdata *= maxd / newdata.max() - nii = nii.__class__(newdata, nii.affine, nii.header) - nii.to_filename(out_file) - return out_file - - -def run_imagemath(nii, op, args, copy_input_header=True, cwd=None): - tmpf_in = NamedTemporaryFile(dir=cwd) - tmpf_out = NamedTemporaryFile(dir=cwd) - in_fname = tmpf_in.name + ".nii.gz" - out_fname = tmpf_out.name + ".nii.gz" - nii.to_filename(in_fname) - imath_cmd = ["ImageMath", "3", out_fname, op, in_fname] + args - os.system(" ".join(imath_cmd)) - new_img = load_img(out_fname) - tmpf_in.close() - tmpf_out.close() - if copy_input_header: - out_nii = nb.Nifti1Image(new_img.get_fdata(), nii.affine, nii.header) - else: - out_nii = new_img - return out_nii - - -def biascorrect(nii, copy_input_header=True, cwd=None): - tmpf_in = NamedTemporaryFile(dir=cwd) - tmpf_out = NamedTemporaryFile(dir=cwd) - in_fname = tmpf_in.name + ".nii.gz" - out_fname = tmpf_out.name + ".nii.gz" - out_bias_fname = tmpf_out.name + "_bias.nii.gz" - nii.to_filename(in_fname) - cmd = [ - "N3BiasFieldCorrection", - "3", - in_fname, - out_fname, - "4", - "none", - "50", - "4", - out_bias_fname, - ] - os.system(" ".join(cmd)) - new_img = load_img(out_fname) - bias_img = load_img(out_bias_fname) - tmpf_in.close() - tmpf_out.close() - if copy_input_header: - out_nii = nb.Nifti1Image(new_img.get_fdata(), nii.affine, nii.header) - out_bias = nb.Nifti1Image(bias_img.get_fdata(), nii.affine, nii.header) - else: - out_nii = new_img - out_bias = bias_img - return out_nii, out_bias - - -def calculate_gradmax_b0_mask(b0_nii, show_plot=False, quantile_max=0.8, pad_size=10, cwd=None): - """Robustly finds a brain mask from a low-res b=0 image. - - The steps for finding a mask for a b=0 image - - 1. Remove spiky outliers with a median filter - 2. Non-aggressively bias correct the image using N3 - 3. Calculate the magnitude of the spatial gradient - 4. Clip the intensity values and rescale them using a Box-Cox transform - 5. Calculate a foreground threshold using Otsu's Method - 6. Try a series of orders for opening. Select the order that maximizes the gradient - from (3) at the edge of the opened mask. - - **Returns** - - mask_nii: spatial image - binary gradient-optimizing mask - scaled_nii: spatial image - robust scaled image for brain extraction - gradient_nii: spatial image - gradient image - """ - total_voxels = np.prod(b0_nii.shape) - if pad_size: - padded_nii = run_imagemath( - b0_nii, "PadImage", [str(pad_size)], copy_input_header=False, cwd=cwd - ) - else: - padded_nii = b0_nii - - # First apply a median filter to the data - footprint = sim.cube(3) - data = padded_nii.get_fdata() - mask = data > 0 - median_filt = ndimage.median_filter(data, footprint=footprint) * mask - median_nii = new_img_like(padded_nii, median_filt) - bc_nii, _ = biascorrect(median_nii, cwd=cwd) - - # Calculate the gradient on the bias-corrected, median filtered image - grad_nii = run_imagemath(bc_nii, "Grad", ["0"], cwd=cwd) - grad_data = grad_nii.get_fdata() - - # Make an edge map - values = np.abs(data[mask].reshape(-1, 1)) - clipped = robust_scale(values, quantile_range=(0, quantile_max), with_centering=False) - scaled = np.clip( - power_transform(clipped, method="box-cox", standardize=False).squeeze(), 0, None - ) - cutoff = otsu(scaled) - binary = scaled > cutoff - data[mask] = binary - scaled_image = data.copy() - scaled_image[mask] = scaled - - # Make a distance-weighted gradient - maurer_abs = new_img_like( - padded_nii, - np.abs( - run_imagemath( - new_img_like(padded_nii, data), "MaurerDistance", [], cwd=cwd - ).get_fdata() - ), - ) - weighted_edges = math_img("1/(img+1)**2 * grad", img=maurer_abs, grad=grad_nii) - grad_data = weighted_edges.get_fdata() - - # Send it out for post processing - edge_scores = [] - opening_values = np.array([2, 4, 6, 8, 10, 12], dtype=int) - opened_masks = [] - selected_voxels = [] - for opening_test in opening_values: - processed_mask, _ = _post_process_mask(data, b0_nii.affine, opening=opening_test) - # Make a mask around the edge of the mask - dilated_mask = ndimage.binary_dilation(processed_mask) - eroded_mask = ndimage.binary_erosion(processed_mask) - mask_edge = dilated_mask ^ eroded_mask - opened_masks.append(processed_mask) - selected_voxels.append(processed_mask.sum() / total_voxels * 100) - # How many edges are captured by the mask edge? - edge_scores.append(grad_data[mask_edge].mean()) - - best_mask = np.argmax(edge_scores) - processed_mask = opened_masks[best_mask] - - if best_mask.sum() < 0.1 * total_voxels: - LOGGER.warning("Degenerate Mask case. Using compute_epi_mask") - epi_mask = compute_epi_mask(new_img_like(padded_nii, scaled_image)) - processed_mask = epi_mask.get_fdata().astype(np.uint8) - - if pad_size: - processed_mask = processed_mask[pad_size:-pad_size, pad_size:-pad_size, pad_size:-pad_size] - scaled_image = scaled_image[pad_size:-pad_size, pad_size:-pad_size, pad_size:-pad_size] - grad_data = grad_data[pad_size:-pad_size, pad_size:-pad_size, pad_size:-pad_size] - - mask_img = new_img_like(b0_nii, processed_mask) - scaled_img = new_img_like(b0_nii, scaled_image) - grad_img = new_img_like(b0_nii, grad_data) - if show_plot: - import matplotlib.pyplot as plt - - print("picked opening=", opening_values[best_mask]) - plot_epi(padded_nii, display_mode="z", cut_coords=10, title="Input Image") - plot_epi(median_nii, display_mode="z", cut_coords=10, title="Median Filtered") - plot_epi(bc_nii, display_mode="z", cut_coords=10, title="Bias Corrected") - fig, ax = plt.subplots(ncols=3) - ax[0].hist(scaled, bins=256) - ax[0].axvline(cutoff, color="k") - ax[0].set_title("Step 2: BoxCox") - ax[1].plot(opening_values, edge_scores, "o-") - ax[1].set_title("Mean Boundary Gradient") - ax[2].plot(selected_voxels, "o-") - ax[2].set_title("Mask Size (% FOV)") - vmax = np.percentile(values, quantile_max * 100) - display = plot_epi(b0_nii, cmap="gray", vmax=vmax, display_mode="z", cut_coords=10) - display.add_contours(mask_img, linewidths=2) - disp2 = plot_epi( - grad_img, - cmap="gray", - resampling_interpolation="nearest", - display_mode="z", - cut_coords=10, - ) - disp2.add_contours(mask_img, linewidths=0.5) - return mask_img, scaled_img, grad_img - - -def watershed_refined_b0_mask( - b0_nii, show_plot=False, pad_size=10, quantile_max=0.8, ribbon_size=5, cwd=None -): - """Refine the boundary of a mask using the watershed algorithm. - - **Returns** - - mask_nii: spatial image - binary gradient-optimizing mask - weighting_mask: spatial image - smoothed mask for use with N4 - """ - - initial_mask_nii, initial_scaled_nii, _ = calculate_gradmax_b0_mask( - b0_nii, show_plot=show_plot, quantile_max=quantile_max, cwd=cwd - ) - - if pad_size: - initial_mask_nii = run_imagemath( - initial_mask_nii, "PadImage", [str(pad_size)], copy_input_header=False, cwd=cwd - ) - initial_scaled_nii = run_imagemath( - initial_scaled_nii, "PadImage", [str(pad_size)], copy_input_header=False, cwd=cwd - ) - - mask_image = initial_mask_nii.get_fdata().astype(np.uint8) - scaled_image = initial_scaled_nii.get_fdata() - - # Find a ribbon to detect the boundary in - morph_size = ribbon_size // 2 if (ribbon_size // 2) % 2 == 1 else ribbon_size // 2 + 1 - eroded_mask = ndimage.binary_erosion(mask_image, structure=sim.cube(morph_size)) - dilated_mask = ndimage.binary_dilation(mask_image, structure=sim.cube(morph_size)) - definitely_outer = ( - ndimage.binary_dilation(dilated_mask, structure=sim.cube(morph_size)) ^ dilated_mask - ) - ribbon_mask = dilated_mask ^ eroded_mask - - # Down-weight data deep in the mask - inner_weights = ndimage.gaussian_filter(eroded_mask.astype(float), sigma=morph_size) - inner_weights = 1.0 - inner_weights / inner_weights.max() - - # Down-weight data as it gets far from the mask - maurer = run_imagemath( - new_img_like(initial_mask_nii, eroded_mask), "MaurerDistance", [], cwd=cwd - ).get_fdata() - outside_mask_distance = np.clip(maurer, 2, None) - outer_weights = 1 / outside_mask_distance - - morph_grad_weights = inner_weights * outer_weights - smoothed_weights = ndimage.gaussian_filter(morph_grad_weights, sigma=morph_size / 2) - smoothed_weights = smoothed_weights / smoothed_weights.max() - - # Calculate the morphological gradient - morph_grad = ( - ndimage.morphological_gradient(scaled_image, footprint=sim.cube(3)) * smoothed_weights - ) - - markers = select_markers_for_rw(morph_grad, eroded_mask, ribbon_mask, definitely_outer) - watershed_seg = watershed(morph_grad, markers) - ws_mask = watershed_seg == 2 - - if pad_size: - ws_mask = ws_mask[pad_size:-pad_size, pad_size:-pad_size, pad_size:-pad_size] - morph_grad = morph_grad[pad_size:-pad_size, pad_size:-pad_size, pad_size:-pad_size] - - # Ensure headers are the same as the input image - mask_img = new_img_like(b0_nii, ws_mask) - grad_img = new_img_like(b0_nii, morph_grad) - - if show_plot: - display = plot_epi(b0_nii, cmap="gray", display_mode="z", cut_coords=10) - display.add_contours(mask_img, linewidths=2) - disp2 = plot_epi( - grad_img, - cmap="gray", - resampling_interpolation="nearest", - display_mode="z", - cut_coords=10, - ) - disp2.add_contours(mask_img, linewidths=0.5) - return mask_img - - -def select_markers_for_rw(image, inner_mask, empty_mask, outer_mask, sample_proportion=0.5): - markers = np.zeros_like(image) - 1.0 - use_as_inner_marker = np.random.rand(inner_mask.sum()) < sample_proportion - use_as_outer_marker = np.random.rand(outer_mask.sum()) < sample_proportion - markers[inner_mask > 0] = use_as_inner_marker.astype(int) * 2 - markers[outer_mask > 0] = use_as_outer_marker.astype(int) * 1 - markers[empty_mask > 0] = 0 - - return markers diff --git a/qsirecon/interfaces/niworkflows.py b/qsirecon/interfaces/niworkflows.py deleted file mode 100644 index ecebb412..00000000 --- a/qsirecon/interfaces/niworkflows.py +++ /dev/null @@ -1,631 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Image tools interfaces -~~~~~~~~~~~~~~~~~~~~~~ - - -""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -from mimetypes import guess_type - -import matplotlib.pyplot as plt -import nibabel as nb -import numpy as np -import seaborn as sns -from matplotlib import gridspec as mgs -from nipype import logging -from nipype.interfaces import ants -from nipype.interfaces.ants import Registration -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - SimpleInterface, - TraitedSpec, - traits, -) -from nipype.interfaces.mixins import reporting -from nipype.utils.filemanip import fname_presuffix -from niworkflows.interfaces.norm import ( - SpatialNormalization, - _SpatialNormalizationInputSpec, -) -from niworkflows.interfaces.reportlets.base import ( - RegistrationRC, - _SVGReportCapableInputSpec, -) -from niworkflows.interfaces.reportlets.registration import ( - _ANTSRegistrationInputSpecRPT, - _ANTSRegistrationOutputSpecRPT, -) -from seaborn import color_palette - -LOGGER = logging.getLogger("nipype.interface") - - -class ANTSRegistrationRPT(RegistrationRC, Registration): - input_spec = _ANTSRegistrationInputSpecRPT - output_spec = _ANTSRegistrationOutputSpecRPT - - def _post_run_hook(self, runtime): - self._fixed_image = self.inputs.fixed_image[0] - self._moving_image = self.aggregate_outputs(runtime=runtime).warped_image - LOGGER.info( - "Report - setting fixed (%s) and moving (%s) images", - self._fixed_image, - self._moving_image, - ) - - return super(ANTSRegistrationRPT, self)._post_run_hook(runtime) - - -class dMRIPlot(object): - """ - Generates the dMRI Summary Plot - """ - - def __init__( - self, - sliceqc_file, - mask_file, - confounds, - usecols=None, - units=None, - vlines=None, - spikes_files=None, - min_slice_size_percentile=10.0, - ): - if sliceqc_file.endswith(".npz") or sliceqc_file.endswith(".npy"): - self.qc_data = np.load(sliceqc_file) - else: - # Load the info from eddy - slice_scores = np.loadtxt(sliceqc_file, skiprows=1) - # Get the slice counts - mask_img = nb.load(mask_file) - mask = mask_img.get_fdata() > 0 - masked_slices = ( - mask * np.arange(mask_img.shape[2])[np.newaxis, np.newaxis, :] - ).astype(int) - slice_nums, slice_counts = np.unique(masked_slices[mask], return_counts=True) - self.qc_data = {"slice_scores": slice_scores, "slice_counts": slice_counts} - - self.confounds = confounds - - def plot(self, figure=None): - """Main plotter""" - sns.set_style("whitegrid") - sns.set_context("paper", font_scale=0.8) - - if figure is None: - figure = plt.gcf() - - to_plot = ["bval", "hmc_xcorr", "framewise_displacement"] - confound_names = [p for p in to_plot if p in self.confounds.columns] - nconfounds = len(confound_names) - nrows = 1 + nconfounds - - # Create grid - grid = mgs.GridSpec( - nrows, 1, wspace=0.0, hspace=0.05, height_ratios=[1] * (nrows - 1) + [5] - ) - - grid_id = 0 - palette = color_palette("husl", nconfounds) - - for i, name in enumerate(confound_names): - tseries = self.confounds[name] - confoundplot(tseries, grid[grid_id], color=palette[i], name=name) - grid_id += 1 - - plot_sliceqc( - self.qc_data["slice_scores"].T, - self.qc_data["slice_counts"], - subplot=grid[-1], - ) - return figure - - -def plot_sliceqc( - slice_data, - nperslice, - size=(950, 800), - subplot=None, - title=None, - output_file=None, - lut=None, - tr=None, -): - """ - Plot an image representation of voxel intensities across time also know - as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage - 2017 Jul 1; 154:150-158. - - Parameters - ---------- - slice_data: 2d array - errors in each slice for each volume - nperslice: 1d array - number of voxels included in each slice - axes : matplotlib axes, optional - The axes used to display the plot. If None, the complete - figure is used. - title : string, optional - The title displayed on the figure. - output_file : string, or None, optional - The name of an image file to export the plot to. Valid extensions - are .png, .pdf, .svg. If output_file is not None, the plot - is saved to a file, and the display is closed. - tr : float , optional - Specify the TR, if specified it uses this value. If left as None, - # Frames is plotted instead of time. - """ - - # Define TR and number of frames - notr = False - if tr is None: - notr = True - tr = 1.0 - - # If subplot is not defined - if subplot is None: - subplot = mgs.GridSpec(1, 1)[0] - - # Define nested GridSpec - wratios = [1, 100] - gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=subplot, width_ratios=wratios, wspace=0.0) - - # Segmentation colorbar - ax0 = plt.subplot(gs[0]) - ax0.set_yticks([]) - ax0.set_xticks([]) - ax0.imshow(nperslice[:, np.newaxis], interpolation="nearest", aspect="auto", cmap="plasma") - ax0.grid(False) - ax0.spines["left"].set_visible(False) - ax0.spines["bottom"].set_color("none") - ax0.spines["bottom"].set_visible(False) - - # Carpet plot - ax1 = plt.subplot(gs[1]) - ax1.imshow(slice_data, interpolation="nearest", aspect="auto", cmap="viridis") - ax1.grid(False) - ax1.set_yticks([]) - ax1.set_yticklabels([]) - - # Set 10 frame markers in X axis - interval = max((int(slice_data.shape[1] + 1) // 10, int(slice_data.shape[1] + 1) // 5, 1)) - xticks = list(range(0, slice_data.shape[1])[::interval]) - ax1.set_xticks(xticks) - if notr: - ax1.set_xlabel("time (frame #)") - else: - ax1.set_xlabel("time (s)") - labels = tr * (np.array(xticks)) - ax1.set_xticklabels(["%.02f" % t for t in labels.tolist()], fontsize=5) - - # Remove and redefine spines - for side in ["top", "right"]: - # Toggle the spine objects - ax0.spines[side].set_color("none") - ax0.spines[side].set_visible(False) - ax1.spines[side].set_color("none") - ax1.spines[side].set_visible(False) - - ax1.yaxis.set_ticks_position("left") - ax1.xaxis.set_ticks_position("bottom") - ax1.spines["bottom"].set_visible(False) - ax1.spines["left"].set_color("none") - ax1.spines["left"].set_visible(False) - - if output_file is not None: - figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") - plt.close(figure) - figure = None - return output_file - - return [ax0, ax1], gs - - -def confoundplot( - tseries, - gs_ts, - gs_dist=None, - name=None, - units=None, - tr=None, - hide_x=True, - color="b", - nskip=0, - cutoff=None, - ylims=None, -): - # Define TR and number of frames - notr = False - if tr is None: - notr = True - tr = 1.0 - ntsteps = len(tseries) - tseries = np.array(tseries) - - # Define nested GridSpec - gs = mgs.GridSpecFromSubplotSpec(1, 2, subplot_spec=gs_ts, width_ratios=[1, 100], wspace=0.0) - - ax_ts = plt.subplot(gs[1]) - ax_ts.grid(False) - - # Set 10 frame markers in X axis - interval = max((ntsteps // 10, ntsteps // 5, 1)) - xticks = list(range(0, ntsteps)[::interval]) - ax_ts.set_xticks(xticks) - - if not hide_x: - if notr: - ax_ts.set_xlabel("time (frame #)") - else: - ax_ts.set_xlabel("time (s)") - labels = tr * np.array(xticks) - ax_ts.set_xticklabels(["%.02f" % t for t in labels.tolist()]) - else: - ax_ts.set_xticklabels([]) - - if name is not None: - if units is not None: - name += " [%s]" % units - - ax_ts.annotate( - name, - xy=(0.0, 0.7), - xytext=(0, 0), - xycoords="axes fraction", - textcoords="offset points", - va="center", - ha="left", - color=color, - size=8, - bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, - }, - ) - - for side in ["top", "right"]: - ax_ts.spines[side].set_color("none") - ax_ts.spines[side].set_visible(False) - - if not hide_x: - ax_ts.spines["bottom"].set_position(("outward", 20)) - ax_ts.xaxis.set_ticks_position("bottom") - else: - ax_ts.spines["bottom"].set_color("none") - ax_ts.spines["bottom"].set_visible(False) - - # ax_ts.spines["left"].set_position(('outward', 30)) - ax_ts.spines["left"].set_color("none") - ax_ts.spines["left"].set_visible(False) - # ax_ts.yaxis.set_ticks_position('left') - - ax_ts.set_yticks([]) - ax_ts.set_yticklabels([]) - - nonnan = tseries[~np.isnan(tseries)] - if nonnan.size > 0: - # Calculate Y limits - def_ylims = [nonnan.min() - 0.1 * abs(nonnan.min()), 1.1 * nonnan.max()] - if ylims is not None: - if ylims[0] is not None: - def_ylims[0] = min([def_ylims[0], ylims[0]]) - if ylims[1] is not None: - def_ylims[1] = max([def_ylims[1], ylims[1]]) - - # Add space for plot title and mean/SD annotation - def_ylims[0] -= 0.1 * (def_ylims[1] - def_ylims[0]) - - ax_ts.set_ylim(def_ylims) - - # Annotate stats - maxv = nonnan.max() - mean = nonnan.mean() - stdv = nonnan.std() - p95 = np.percentile(nonnan, 95.0) - else: - maxv = 0 - mean = 0 - stdv = 0 - p95 = 0 - - stats_label = ( - r"max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} " - r"$\bullet$ $\sigma$: {sigma:.3f}" - ).format(max=maxv, mean=mean, units=units or "", sigma=stdv) - ax_ts.annotate( - stats_label, - xy=(0.98, 0.7), - xycoords="axes fraction", - xytext=(0, 0), - textcoords="offset points", - va="center", - ha="right", - color=color, - size=4, - bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, - }, - ) - - # Annotate percentile 95 - ax_ts.plot((0, ntsteps - 1), [p95] * 2, linewidth=0.1, color="lightgray") - ax_ts.annotate( - "%.2f" % p95, - xy=(0, p95), - xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="lightgray", - size=3, - ) - - if cutoff is None: - cutoff = [] - - for i, thr in enumerate(cutoff): - ax_ts.plot((0, ntsteps - 1), [thr] * 2, linewidth=0.2, color="dimgray") - - ax_ts.annotate( - "%.2f" % thr, - xy=(0, thr), - xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="dimgray", - size=3, - ) - - ax_ts.plot(tseries, color=color, linewidth=0.8) - ax_ts.set_xlim((0, ntsteps - 1)) - - if gs_dist is not None: - ax_dist = plt.subplot(gs_dist) - sns.distplot(tseries, vertical=True, ax=ax_dist) - ax_dist.set_xlabel("Timesteps") - ax_dist.set_ylim(ax_ts.get_ylim()) - ax_dist.set_yticklabels([]) - - return [ax_ts, ax_dist], gs - return ax_ts, gs - - -class RobustMNINormalizationInputSpecRPT( - _SVGReportCapableInputSpec, - _SpatialNormalizationInputSpec, -): - # Template orientation. - orientation = traits.Enum( - "LPS", - mandatory=True, - usedefault=True, - desc="modify template orientation (should match input image)", - ) - - -class RobustMNINormalizationOutputSpecRPT( - reporting.ReportCapableOutputSpec, - ants.registration.RegistrationOutputSpec, -): - # Try to work around TraitError of "undefined 'reference_image' attribute" - reference_image = traits.File(desc="the output reference image") - - -class RobustMNINormalizationRPT(RegistrationRC, SpatialNormalization): - input_spec = RobustMNINormalizationInputSpecRPT - output_spec = RobustMNINormalizationOutputSpecRPT - - def _post_run_hook(self, runtime): - # We need to dig into the internal ants.Registration interface - self._fixed_image = self._get_ants_args()["fixed_image"] - if isinstance(self._fixed_image, (list, tuple)): - self._fixed_image = self._fixed_image[0] # get first item if list - - if self._get_ants_args().get("fixed_image_mask") is not None: - self._fixed_image_mask = self._get_ants_args().get("fixed_image_mask") - self._moving_image = self.aggregate_outputs(runtime=runtime).warped_image - LOGGER.info( - "Report - setting fixed (%s) and moving (%s) images", - self._fixed_image, - self._moving_image, - ) - - return super(RobustMNINormalizationRPT, self)._post_run_hook(runtime) - - -class FUGUEvsm2ANTSwarpInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input displacements field map") - pe_dir = traits.Enum("i", "i-", "j", "j-", "k", "k-", desc="phase-encoding axis") - - -class FUGUEvsm2ANTSwarpOutputSpec(TraitedSpec): - out_file = File(desc="the output warp field") - - -class FUGUEvsm2ANTSwarp(SimpleInterface): - """Convert a voxel-shift-map to ants warp.""" - - input_spec = FUGUEvsm2ANTSwarpInputSpec - output_spec = FUGUEvsm2ANTSwarpOutputSpec - - def _run_interface(self, runtime): - nii = nb.load(self.inputs.in_file) - - phaseEncDim = {"i": 0, "j": 1, "k": 2}[self.inputs.pe_dir[0]] - - if len(self.inputs.pe_dir) == 2: - phaseEncSign = 1.0 - else: - phaseEncSign = -1.0 - - # Fix header - hdr = nii.header.copy() - hdr.set_data_dtype(np.dtype(" b0_threshold) - - data_b0s = np.squeeze(np.take(data, b0_idx, axis=3)) - data_dwi = np.squeeze(np.take(data, dwi_idx, axis=3)) - - # create empty arrays - denoised_b0s = np.empty((data_b0s.shape), dtype=calc_dtype) - denoised_dwi = np.empty((data_dwi.shape), dtype=calc_dtype) - - denoised_arr = np.empty((data.shape), dtype=calc_dtype) - - if verbose: - t1 = time.time() - - # if only 1 b0 volume, skip denoising it - if data_b0s.ndim == 3 or not b0_denoising: - if verbose: - print("b0 denoising skipped...") - denoised_b0s = data_b0s - - else: - train_b0 = _extract_3d_patches( - np.pad( - data_b0s, - ( - (patch_radius[0], patch_radius[0]), - (patch_radius[1], patch_radius[1]), - (patch_radius[2], patch_radius[2]), - (0, 0), - ), - mode="constant", - ), - patch_radius=patch_radius, - ) - - for vol_idx in range(0, data_b0s.shape[3]): - denoised_b0s[..., vol_idx] = _vol_denoise( - train_b0, vol_idx, model, data_b0s.shape, alpha=alpha - ) - - if verbose: - print("Denoised b0 Volume: ", vol_idx) - - # Separate denoising for DWI volumes - train_dwi = _extract_3d_patches( - np.pad( - data_dwi, - ( - (patch_radius[0], patch_radius[0]), - (patch_radius[1], patch_radius[1]), - (patch_radius[2], patch_radius[2]), - (0, 0), - ), - mode="constant", - ), - patch_radius=patch_radius, - ) - - # Insert the separately denoised arrays into the respective empty arrays - for vol_idx in range(0, data_dwi.shape[3]): - denoised_dwi[..., vol_idx] = _vol_denoise( - train_dwi, vol_idx, model, data_dwi.shape, alpha=alpha - ) - - if verbose: - print("Denoised DWI Volume: ", vol_idx) - - if verbose: - t2 = time.time() - print("Total time taken for Patch2Self: ", t2 - t1, " seconds") - - if data_b0s.ndim == 3: - denoised_arr[:, :, :, b0_idx[0][0]] = denoised_b0s - else: - for i, idx in enumerate(b0_idx): - denoised_arr[:, :, :, idx[0]] = np.squeeze(denoised_b0s[..., i]) - - for i, idx in enumerate(dwi_idx): - denoised_arr[:, :, :, idx[0]] = np.squeeze(denoised_dwi[..., i]) - - # shift intensities per volume to handle for negative intensities - if shift_intensity and not clip_negative_vals: - for i in range(0, denoised_arr.shape[3]): - shift = np.min(data[..., i]) - np.min(denoised_arr[..., i]) - denoised_arr[..., i] = denoised_arr[..., i] + shift - - # clip out the negative values from the denoised output - elif clip_negative_vals and not shift_intensity: - denoised_arr.clip(min=0, out=denoised_arr) - - elif clip_negative_vals and shift_intensity: - config.loggers.interface.warning( - "Both `clip_negative_vals` and `shift_intensity` cannot be True." - ) - config.loggers.interface.warning("Defaulting to `clip_negative_bvals`...") - denoised_arr.clip(min=0, out=denoised_arr) - - # Calculate a "noise level" image - noise_level_image = np.sqrt(np.mean((data - denoised_arr) ** 2, axis=3)) - - return np.array(denoised_arr, dtype=out_dtype), noise_level_image diff --git a/qsirecon/interfaces/qc.py b/qsirecon/interfaces/qc.py index 5da6142c..99e90a9b 100644 --- a/qsirecon/interfaces/qc.py +++ b/qsirecon/interfaces/qc.py @@ -1,12 +1,10 @@ import base64 -import os.path as op from io import BytesIO import matplotlib.pyplot as plt import nibabel as nib import numpy as np from dipy.segment.mask import median_otsu -from nipype.utils.filemanip import load_json, save_json def reorient_array(data, aff): @@ -260,35 +258,3 @@ def createB0_ColorFA_Mask_Sprites(b0_file, colorFA_file, mask_file): outmask["img"] = img return outb0, outcolorFA, outmask - - -def create_report_json( - dwi_corrected_file, - eddy_rms, - eddy_report, - color_fa_file, - anat_mask_file, - outlier_indices, - eddy_qc_file, - outpath=op.abspath("./report.json"), -): - - report = {} - report["dwi_corrected"] = createSprite4D(dwi_corrected_file) - - b0, colorFA, mask = createB0_ColorFA_Mask_Sprites( - dwi_corrected_file, color_fa_file, anat_mask_file - ) - report["b0"] = b0 - report["colorFA"] = colorFA - report["anat_mask"] = mask - report["outlier_volumes"] = outlier_indices.tolist() - - with open(eddy_report, "r") as f: - report["eddy_report"] = f.readlines() - - report["eddy_params"] = np.genfromtxt(eddy_rms).tolist() - eddy_qc = load_json(eddy_qc_file) - report["eddy_quad"] = eddy_qc - save_json(outpath, report) - return outpath diff --git a/qsirecon/interfaces/reports.py b/qsirecon/interfaces/reports.py index e69dc27b..ff982f33 100644 --- a/qsirecon/interfaces/reports.py +++ b/qsirecon/interfaces/reports.py @@ -10,31 +10,18 @@ """ import json -import os import os.path as op -import re -import time -from collections import defaultdict import matplotlib import matplotlib.pyplot as plt -import nibabel as nb import numpy as np import pandas as pd -import seaborn as sns -from matplotlib import animation -from nilearn.maskers import NiftiLabelsMasker -from nipype.interfaces import freesurfer as fs from nipype.interfaces.base import ( BaseInterfaceInputSpec, CommandLine, CommandLineInputSpec, - Directory, File, - InputMultiObject, - InputMultiPath, SimpleInterface, - Str, TraitedSpec, isdefined, traits, @@ -42,55 +29,8 @@ from nipype.interfaces.mixins import reporting from scipy.io.matlab import loadmat -from .bids import get_bids_params -from .gradients import concatenate_bvals, concatenate_bvecs from .qc import createB0_ColorFA_Mask_Sprites, createSprite4D -SUBJECT_TEMPLATE = """\t
      -\t\t
    • Subject ID: {subject_id}
    • -\t\t
    • Structural images: {n_t1s:d} T1-weighted {t2w}
    • -\t\t
    • Diffusion-weighted series: inputs {n_dwis:d}, outputs {n_outputs:d}
    • -{groupings} -\t\t
    • Resampling targets: T1wACPC -\t\t
    • FreeSurfer reconstruction: {freesurfer_status}
    • -\t
    -""" - -DIFFUSION_TEMPLATE = """\t\t

    Summary

    -\t\t
      -\t\t\t
    • Phase-encoding (PE) direction: {pedir}
    • -\t\t\t
    • Susceptibility distortion correction: {sdc}
    • -\t\t\t
    • Coregistration Transform: {coregistration}
    • -\t\t\t
    • Denoising Method: {denoise_method}
    • -\t\t\t
    • Denoising Window: {denoise_window}
    • -\t\t\t
    • HMC Transform: {hmc_transform}
    • -\t\t\t
    • HMC Model: {hmc_model}
    • -\t\t\t
    • DWI series resampled to spaces: T1wACPC
    • -\t\t\t
    • Confounds collected: {confounds}
    • -\t\t\t
    • Impute slice threshold: {impute_slice_threshold}
    • -\t\t
    -{validation_reports} -""" - -ABOUT_TEMPLATE = """\t
      -\t\t
    • qsirecon version: {version}
    • -\t\t
    • qsirecon command: {command}
    • -\t\t
    • Date preprocessed: {date}
    • -\t
    - -""" - -TOPUP_TEMPLATE = """\ -\t\t

    -\t\t{summary}

    -""" - -GROUPING_TEMPLATE = """\t
      -\t\t
    • Output Name: {output_name}
    • -{input_files} -
    -""" - INTERACTIVE_TEMPLATE = """ @@ -121,575 +61,6 @@ class SummaryOutputSpec(TraitedSpec): out_report = File(exists=True, desc="HTML segment containing summary") -class SummaryInterface(SimpleInterface): - output_spec = SummaryOutputSpec - - def _generate_segment(self): - raise NotImplementedError() - - def _run_interface(self, runtime): - segment = self._generate_segment() - fname = os.path.join(runtime.cwd, "report.html") - with open(fname, "w") as fobj: - fobj.write(segment) - self._results["out_report"] = fname - return runtime - - -class SubjectSummaryInputSpec(BaseInterfaceInputSpec): - t1w = InputMultiPath(File(exists=True), desc="T1w structural images") - t2w = InputMultiPath(File(exists=True), desc="T2w structural images") - subjects_dir = Directory(desc="FreeSurfer subjects directory") - subject_id = Str(desc="Subject ID") - dwi_groupings = traits.Dict(desc="groupings of DWI files and their output names") - output_spaces = traits.List(desc="Target spaces") - template = traits.Enum("MNI152NLin2009cAsym", desc="Template space") - - -class SubjectSummaryOutputSpec(SummaryOutputSpec): - # This exists to ensure that the summary is run prior to the first ReconAll - # call, allowing a determination whether there is a pre-existing directory - subject_id = Str(desc="FreeSurfer subject ID") - - -class SubjectSummary(SummaryInterface): - input_spec = SubjectSummaryInputSpec - output_spec = SubjectSummaryOutputSpec - - def _run_interface(self, runtime): - if isdefined(self.inputs.subject_id): - self._results["subject_id"] = self.inputs.subject_id - return super(SubjectSummary, self)._run_interface(runtime) - - def _generate_segment(self): - if not isdefined(self.inputs.subjects_dir): - freesurfer_status = "Not run" - else: - recon = fs.ReconAll( - subjects_dir=self.inputs.subjects_dir, - subject_id=self.inputs.subject_id, - T1_files=self.inputs.t1w, - flags="-noskullstrip", - ) - if recon.cmdline.startswith("echo"): - freesurfer_status = "Pre-existing directory" - else: - freesurfer_status = "Run by qsirecon" - - t2w_seg = "" - if self.inputs.t2w: - t2w_seg = "(+ {:d} T2-weighted)".format(len(self.inputs.t2w)) - - # Add text for how the dwis are grouped - n_dwis = 0 - n_outputs = 0 - groupings = "" - if isdefined(self.inputs.dwi_groupings): - for output_fname, group_info in self.inputs.dwi_groupings.items(): - n_outputs += 1 - files_desc = [] - files_desc.append( - "\t\t\t
  • Scan group: %s (PE Dir %s)
    • " - % (output_fname, group_info["dwi_series_pedir"]) - ) - files_desc.append("\t\t\t\t
    • DWI Files:
    • ") - for dwi_file in group_info["dwi_series"]: - files_desc.append("\t\t\t\t\t
    • %s
    • " % dwi_file) - n_dwis += 1 - fieldmap_type = group_info["fieldmap_info"]["suffix"] - if fieldmap_type is not None: - files_desc.append("\t\t\t\t
    • Fieldmap type: %s
    • " % fieldmap_type) - - for key, value in group_info["fieldmap_info"].items(): - files_desc.append("\t\t\t\t\t
    • %s: %s
    • " % (key, str(value))) - n_dwis += 1 - files_desc.append("
    ") - groupings += GROUPING_TEMPLATE.format( - output_name=output_fname, input_files="\n".join(files_desc) - ) - - return SUBJECT_TEMPLATE.format( - subject_id=self.inputs.subject_id, - n_t1s=len(self.inputs.t1w), - t2w=t2w_seg, - n_dwis=n_dwis, - n_outputs=n_outputs, - groupings=groupings, - output_spaces="T1wACPC", - freesurfer_status=freesurfer_status, - ) - - -class DiffusionSummaryInputSpec(BaseInterfaceInputSpec): - distortion_correction = traits.Str( - desc="Susceptibility distortion correction method", mandatory=True - ) - pe_direction = traits.Enum( - None, "i", "i-", "j", "j-", mandatory=True, desc="Phase-encoding direction detected" - ) - distortion_correction = traits.Str(mandatory=True, desc="Method used for SDC") - impute_slice_threshold = traits.CFloat(desc="threshold for imputing a slice") - hmc_transform = traits.Str(mandatory=True, desc="transform used during HMC") - hmc_model = traits.Str(desc="model used for hmc") - b0_to_t1w_transform = traits.Enum("Rigid", "Affine", desc="Transform type for coregistration") - denoise_method = traits.Str(desc="method used for image denoising") - dwi_denoise_window = traits.Either( - traits.Int(), traits.Str(), desc="window size for dwidenoise" - ) - output_spaces = traits.List(desc="Target spaces") - confounds_file = File(exists=True, desc="Confounds file") - validation_reports = InputMultiObject(File(exists=True)) - - -class DiffusionSummary(SummaryInterface): - input_spec = DiffusionSummaryInputSpec - - def _generate_segment(self): - if self.inputs.pe_direction is None: - pedir = "MISSING - Assuming Anterior-Posterior" - else: - pedir = {"i": "Left-Right", "j": "Anterior-Posterior"}[self.inputs.pe_direction[0]] - - if isdefined(self.inputs.confounds_file): - with open(self.inputs.confounds_file) as cfh: - conflist = cfh.readline().strip("\n").strip() - else: - conflist = "" - - validation_summaries = [] - for summary in self.inputs.validation_reports: - with open(summary, "r") as summary_f: - validation_summaries.extend(summary_f.readlines()) - validation_summary = "\n".join(validation_summaries) - - return DIFFUSION_TEMPLATE.format( - pedir=pedir, - sdc=self.inputs.distortion_correction, - coregistration=self.inputs.b0_to_t1w_transform, - hmc_transform=self.inputs.hmc_transform, - hmc_model=self.inputs.hmc_model, - denoise_method=self.inputs.denoise_method, - denoise_window=self.inputs.dwi_denoise_window, - output_spaces="T1wACPC", - confounds=re.sub(r"[\t ]+", ", ", conflist), - impute_slice_threshold=self.inputs.impute_slice_threshold, - validation_reports=validation_summary, - ) - - -class AboutSummaryInputSpec(BaseInterfaceInputSpec): - version = Str(desc="qsirecon version") - command = Str(desc="qsirecon command") - # Date not included - update timestamp only if version or command changes - - -class AboutSummary(SummaryInterface): - input_spec = AboutSummaryInputSpec - - def _generate_segment(self): - return ABOUT_TEMPLATE.format( - version=self.inputs.version, - command=self.inputs.command, - date=time.strftime("%Y-%m-%d %H:%M:%S %z"), - ) - - -class TopupSummaryInputSpec(BaseInterfaceInputSpec): - summary = Str(desc="Summary of TOPUP inputs") - - -class TopupSummary(SummaryInterface): - input_spec = TopupSummaryInputSpec - - def _generate_segment(self): - return TOPUP_TEMPLATE.format(summary=self.inputs.summary) - - -class GradientPlotInputSpec(BaseInterfaceInputSpec): - orig_bvec_files = InputMultiObject( - File(exists=True), mandatory=True, desc="bvecs from DWISplit" - ) - orig_bval_files = InputMultiObject( - File(exists=True), mandatory=True, desc="bvals from DWISplit" - ) - source_files = traits.List(desc="source file for each gradient") - final_bvec_file = File(exists=True, desc="bval file") - - -class GradientPlotOutputSpec(SummaryOutputSpec): - plot_file = File(exists=True) - - -class GradientPlot(SummaryInterface): - input_spec = GradientPlotInputSpec - output_spec = GradientPlotOutputSpec - - def _run_interface(self, runtime): - outfile = os.path.join(runtime.cwd, "bvec_plot.gif") - sns.set_style("whitegrid") - sns.set_context("paper", font_scale=0.8) - - orig_bvecs = concatenate_bvecs(self.inputs.orig_bvec_files) - bvals = concatenate_bvals(self.inputs.orig_bval_files, None) - if isdefined(self.inputs.source_files): - file_array = np.array(self.inputs.source_files) - _, filenums = np.unique(file_array, return_inverse=True) - else: - filenums = np.ones_like(bvals) - - # Account for the possibility that this is a PE Pair average - if len(filenums) == len(bvals) * 2: - filenums = filenums[: len(bvals)] - - # Plot the final bvecs if provided - final_bvecs = None - if isdefined(self.inputs.final_bvec_file): - final_bvecs = np.loadtxt(self.inputs.final_bvec_file).T - - plot_gradients(bvals, orig_bvecs, filenums, outfile, final_bvecs) - self._results["plot_file"] = outfile - return runtime - - -def plot_gradients(bvals, orig_bvecs, source_filenums, output_fname, final_bvecs=None, frames=60): - qrads = np.sqrt(bvals) - qvecs = qrads[:, np.newaxis] * orig_bvecs - qx, qy, qz = qvecs.T - maxvals = qvecs.max(0) - minvals = qvecs.min(0) - total_max = max(np.abs(maxvals).max(), np.abs(minvals).max()) - - def force_scaling(ax): - # trick to force equal aspect on all 3 axes - for direction in (-1, 1): - for point in np.diag(direction * total_max * np.array([1, 1, 1])): - ax.plot([point[0]], [point[1]], [point[2]], "w") - - def add_lines(ax): - labels = ["L", "P", "S"] - for axnum in range(3): - minvec = np.zeros(3) - maxvec = np.zeros(3) - minvec[axnum] = minvals[axnum] - maxvec[axnum] = maxvals[axnum] - x, y, z = np.column_stack([minvec, maxvec]) - ax.plot(x, y, z, color="k") - txt_pos = maxvec + 5 - ax.text(txt_pos[0], txt_pos[1], txt_pos[2], labels[axnum], size=8, zorder=1, color="k") - - if final_bvecs is not None: - if final_bvecs.shape[0] == 3: - final_bvecs = final_bvecs.T - fqx, fqy, fqz = (qrads[:, np.newaxis] * final_bvecs).T - fig, axes = plt.subplots( - nrows=1, ncols=2, figsize=(10, 5), subplot_kw={"projection": "3d"} - ) - orig_ax = axes[0] - final_ax = axes[1] - axes_list = [orig_ax, final_ax] - final_ax.scatter(fqx, fqy, fqz, c=source_filenums, marker="+") - orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+") - final_ax.axis("off") - add_lines(final_ax) - final_ax.set_title("After Preprocessing") - else: - fig, orig_ax = plt.subplots( - nrows=1, ncols=1, figsize=(10, 5), subplot_kw={"aspect": "equal", "projection": "3d"} - ) - axes_list = [orig_ax] - orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+") - orig_ax.axis("off") - orig_ax.set_title("Original Scheme") - add_lines(orig_ax) - force_scaling(orig_ax) - # Animate rotating the axes - rotate_amount = np.ones(frames) * 180 / frames - stay_put = np.zeros_like(rotate_amount) - rotate_azim = np.concatenate([rotate_amount, stay_put, -rotate_amount, stay_put]) - rotate_elev = np.concatenate([stay_put, rotate_amount, stay_put, -rotate_amount]) - plt.tight_layout() - - def rotate(i): - for ax in axes_list: - ax.azim += rotate_azim[i] - ax.elev += rotate_elev[i] - return tuple(axes_list) - - anim = animation.FuncAnimation(fig, rotate, frames=frames * 4, interval=20, blit=False) - anim.save(output_fname, writer="imagemagick", fps=32) - - plt.close(fig) - fig = None - - -def topup_selection_to_report( - selected_indices, original_files, spec_lookup, image_source="combined DWI series" -): - """Write a description of how the images were selected for TOPUP. - - >>> selected_indices = [0, 15, 30, 45] - >>> original_files = ["sub-1_dir-AP_dwi.nii.gz"] * 30 + ["sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> spec_lookup = {"sub-1_dir-AP_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 2 distortion groups was included in the combined dwi data. Distortion \ -group '0 1 0 0.087' was represented by images 0, 15 from sub-1_dir-AP_dwi.nii.gz. \ -Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. " - - Or - - >>> selected_indices = [0, 15, 30, 45] - >>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 2 distortion groups was included in the combined dwi data. Distortion \ -group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz and \ -image 0 from sub-1_dir-AP_run-2_dwi.nii.gz. Distortion group '0 -1 0 0.087' was represented \ -by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. - - >>> selected_indices = [0, 15, 30, 45, 60] - >>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-AP_run-3_dwi.nii.gz"] * 15 + [ - ... "sub-1_dir-PA_dwi.nii.gz"] * 30 - >>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-AP_run-3_dwi.nii.gz": "0 1 0 0.087", - ... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 2 distortion groups was included in the combined dwi data. Distortion \ -group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz, \ -image 0 from sub-1_dir-AP_run-2_dwi.nii.gz and image 0 from sub-1_dir-AP_run-3_dwi.nii.gz. \ -Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. - - >>> selected_indices = [0, 15, 30, 45] - >>> original_files = ["sub-1_dir-PA_dwi.nii.gz"] * 60 - >>> spec_lookup = {"sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"} - >>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup)) - A total of 1 distortion group was included in the combined dwi data. \ -Distortion group '0 -1 0 0.087' was represented by images 0, 15, 30, 45 \ -from sub-1_dir-PA_dwi.nii.gz. - - """ - image_indices = defaultdict(list) - for imgnum, image in enumerate(original_files): - image_indices[image].append(imgnum) - - # Collect the original volume number within each source image - selected_per_image = defaultdict(list) - for b0_index in selected_indices: - b0_image = original_files[b0_index] - first_index = min(image_indices[b0_image]) - within_image_index = b0_index - first_index - selected_per_image[b0_image].append(within_image_index) - - # Collect the images and indices within each warp group - selected_per_warp_group = defaultdict(list) - for original_image, selection in selected_per_image.items(): - warp_group = spec_lookup[original_image] - selected_per_warp_group[warp_group].append((original_image, selection)) - - # Make the description - num_groups = len(selected_per_warp_group) - plural = "s" if num_groups > 1 else "" - plural2 = "were" if plural == "s" else "was" - desc = [ - "A total of {num_groups} distortion group{plural} {plural2} included in the " - "{image_source} data. ".format( - num_groups=num_groups, plural=plural, plural2=plural2, image_source=image_source - ) - ] - for distortion_group, image_list in selected_per_warp_group.items(): - group_desc = [ - "Distortion group '{spec}' was represented by ".format(spec=distortion_group) - ] - for image_name, image_indices in image_list: - formatted_indices = ", ".join(map(str, image_indices)) - plural = "s" if len(image_indices) > 1 else "" - group_desc += [ - "image{plural} {imgnums} from {img_name}".format( - plural=plural, imgnums=formatted_indices, img_name=op.split(image_name)[-1] - ), - ", ", - ] - group_desc[-1] = ". " - if len(image_list) > 1: - group_desc[-3] = " and " - desc += group_desc - - return "".join(desc) - - -class _SeriesQCInputSpec(BaseInterfaceInputSpec): - pre_qc = File(exists=True, desc="qc file from the raw data", mandatory=True) - t1_qc = File(exists=True, desc="qc file from preprocessed image in t1 space") - t1_qc_postproc = File(exists=True, desc="qc file from preprocessed image in template space") - confounds_file = File(exists=True, desc="confounds file", mandatory=True) - t1_mask_file = File(exists=True, desc="brain mask in t1 space") - t1_cnr_file = File(exists=True, desc="CNR file in t1 space") - t1_b0_series = File(exists=True, desc="time series of b=0 images") - t1_dice_score = traits.Float() - mni_dice_score = traits.Float() - output_file_name = traits.File() - - -class _SeriesQCOutputSpec(TraitedSpec): - series_qc_file = File(exists=True) - - -class SeriesQC(SimpleInterface): - input_spec = _SeriesQCInputSpec - output_spec = _SeriesQCOutputSpec - - def _run_interface(self, runtime): - image_qc = _load_qc_file(self.inputs.pre_qc, prefix="raw_") - if isdefined(self.inputs.t1_qc): - image_qc.update(_load_qc_file(self.inputs.t1_qc, prefix="t1_")) - if isdefined(self.inputs.t1_qc_postproc): - image_qc.update(_load_qc_file(self.inputs.t1_qc_postproc, prefix="t1post_")) - motion_summary = calculate_motion_summary(self.inputs.confounds_file) - image_qc.update(motion_summary) - - # Add in Dice scores if available - if isdefined(self.inputs.t1_dice_score): - image_qc["t1_dice_distance"] = [self.inputs.t1_dice_score] - if isdefined(self.inputs.mni_dice_score): - image_qc["mni_dice_distance"] = [self.inputs.mni_dice_score] - - if isdefined(self.inputs.t1_mask_file): - if isdefined(self.inputs.t1_cnr_file): - image_qc.update(get_cnr_values(self.inputs.t1_cnr_file, self.inputs.t1_mask_file)) - if isdefined(self.inputs.t1_b0_series): - # Add a function to get b=0 TSNR - pass - - # Get the metadata - output_file = self.inputs.output_file_name - image_qc["file_name"] = output_file - bids_info = get_bids_params(output_file) - image_qc.update(bids_info) - output = op.join(runtime.cwd, "dwi_qc.csv") - pd.DataFrame(image_qc).to_csv(output, index=False) - self._results["series_qc_file"] = output - return runtime - - -def _load_qc_file(fname, prefix=""): - qc_data = pd.read_csv(fname).to_dict(orient="records")[0] - renamed = dict([(prefix + key, value) for key, value in qc_data.items()]) - return renamed - - -def get_cnr_values(cnr_image, brain_mask): - cnr_img = nb.load(cnr_image) - mask_img = nb.load(brain_mask) - - # Determine which CNRs we's getting - num_cnrs = 1 if cnr_img.ndim == 3 else cnr_img.shape[3] - if num_cnrs == 1: - cnr_labels = ["CNR"] - else: - cnr_labels = ["CNR%d" % value for value in range(num_cnrs)] - - cnrs = {} - strategies = ["mean", "median", "standard_deviation"] - for strategy in strategies: - masker = NiftiLabelsMasker(mask_img, strategy=strategy, resampling_target="data") - cnr_values = masker.fit_transform(cnr_img).flatten() - for cnr_name, cnr_value in zip(cnr_labels, cnr_values): - cnrs[cnr_name + "_" + strategy] = cnr_value - - return cnrs - - -def motion_derivatives(translations, rotations, framewise_disp, original_files): - - def padded_diff(data): - out = np.zeros_like(data) - out[1:] = np.diff(data, axis=0) - return out - - drotations = padded_diff(rotations) - dtranslations = padded_diff(translations) - - # We don't want the relative values across the boundaries of runs. - # Determine which values should be ignored - file_labels, _ = pd.factorize(original_files) - new_files = padded_diff(file_labels) - - def file_masked(data): - masked_data = data.copy() - masked_data[new_files > 0] = 0 - return masked_data - - framewise_disp = file_masked(framewise_disp) - return { - "mean_fd": [framewise_disp.mean()], - "max_fd": [framewise_disp.max()], - "max_rotation": [file_masked(np.abs(rotations)).max()], - "max_translation": [file_masked(np.abs(translations)).max()], - "max_rel_rotation": [file_masked(np.abs(drotations)).max()], - "max_rel_translation": [file_masked(np.abs(dtranslations)).max()], - } - - -def calculate_motion_summary(confounds_tsv): - if not isdefined(confounds_tsv) or confounds_tsv is None: - return { - "mean_fd": [np.nan], - "max_fd": [np.nan], - "max_rotation": [np.nan], - "max_translation": [np.nan], - "max_rel_rotation": [np.nan], - "max_rel_translation": [np.nan], - } - df = pd.read_csv(confounds_tsv, delimiter="\t") - - # the default case where each output image comes from one input image - if "trans_x" in df.columns: - translations = df[["trans_x", "trans_y", "trans_z"]].values - rotations = df[["rot_x", "rot_y", "rot_z"]].values - return motion_derivatives( - translations, rotations, df["framewise_displacement"], df["original_file"] - ) - - # If there was a PE Pair averaging, get motion from both - motion1 = motion_derivatives( - df[["trans_x_1", "trans_y_1", "trans_z_1"]].values, - df[["rot_x_1", "rot_y_1", "rot_z_1"]].values, - df["framewise_displacement_1"], - df["original_file_1"], - ) - - motion2 = motion_derivatives( - df[["trans_x_2", "trans_y_2", "trans_z_2"]].values, - df[["rot_x_2", "rot_y_2", "rot_z_2"]].values, - df["framewise_displacement_2"], - df["original_file_2"], - ) - - # Combine the FDs from both PE directions - # both_fd = np.column_stack([m1, m2]) - # framewise_disp = both_fd[np.nanargmax(np.abs(both_fd), axis=1)] - def compare_series(key_name, comparator): - m1 = motion1[key_name][0] - m2 = motion2[key_name][0] - return [comparator(m1, m2)] - - return { - "mean_fd": compare_series("mean_fd", lambda a, b: (a + b) / 2), - "max_fd": compare_series("max_fd", max), - "max_rotation": compare_series("max_rotation", max), - "max_translation": compare_series("max_translation", max), - "max_rel_rotation": compare_series("max_rel_rotation", max), - "max_rel_translation": compare_series("max_rel_translation", max), - } - - class _InteractiveReportInputSpec(TraitedSpec): raw_dwi_file = File(exists=True, mandatory=True) processed_dwi_file = File(exists=True, mandatory=True) diff --git a/qsirecon/interfaces/shoreline.py b/qsirecon/interfaces/shoreline.py deleted file mode 100644 index e75682a0..00000000 --- a/qsirecon/interfaces/shoreline.py +++ /dev/null @@ -1,521 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -SHORELine interfaces -~~~~~~~~~~~~~~~~~~~~~~ - - -""" -import os -import os.path as op - -import dipy.reconst.dti as dti -import imageio -import matplotlib.pyplot as plt -import nibabel as nb -import numpy as np -import pandas as pd -import seaborn as sns -from dipy.core.gradients import gradient_table -from nipype import logging -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - InputMultiObject, - OutputMultiObject, - SimpleInterface, - TraitedSpec, - isdefined, - traits, -) -from skimage import measure - -from ..utils.brainsuite_shore import BrainSuiteShoreModel, brainsuite_shore_basis -from .gradients import concatenate_bvals, concatenate_bvecs -from .reports import SummaryInterface, SummaryOutputSpec - -LOGGER = logging.getLogger("nipype.interface") - - -def _nonoverlapping_qspace_samples(prediction_bval, prediction_bvec, all_bvals, all_bvecs, cutoff): - """Ensure that none of the training samples are too close to the sample to predict. - - Parameters - """ - min_bval = min(all_bvals.min(), prediction_bval) - all_qvals = np.sqrt(all_bvals - min_bval) - prediction_qval = np.sqrt(prediction_bval - min_bval) - - # Convert q values to percent of maximum qval - max_qval = max(all_qvals.max(), prediction_qval) - all_qvals_scaled = all_qvals / max_qval * 100 - prediction_qval_scaled = prediction_qval / max_qval * 100 - scaled_qvecs = all_bvecs * all_qvals_scaled[:, np.newaxis] - scaled_prediction_qvec = prediction_bvec * prediction_qval_scaled - - # Calculate the distance between the sampled qvecs and the prediction qvec - distances = np.linalg.norm(scaled_qvecs - scaled_prediction_qvec, axis=1) - distances_flip = np.linalg.norm(scaled_qvecs + scaled_prediction_qvec, axis=1) - ok_samples = (distances > cutoff) * (distances_flip > cutoff) - - return ok_samples - - -class B0MeanInputSpec(BaseInterfaceInputSpec): - b0_images = InputMultiObject(File(exists=True), mandatory=True) - - -class B0MeanOutputSpec(TraitedSpec): - average_image = File(exists=True) - - -class B0Mean(SimpleInterface): - input_spec = B0MeanInputSpec - output_spec = B0MeanOutputSpec - - def _run_interface(self, runtime): - b0_images = [nb.load(fname) for fname in self.inputs.b0_images] - b0_mean = np.stack([img.get_fdata() for img in b0_images], -1).mean(3) - mean_file = op.join(runtime.cwd, "b0_mean.nii.gz") - nb.Nifti1Image(b0_mean, b0_images[0].affine, b0_images[0].header).to_filename(mean_file) - self._results["average_image"] = mean_file - return runtime - - -class ExtractDWISForModelInputSpec(BaseInterfaceInputSpec): - dwi_files = InputMultiObject(File(exists=True)) - bval_files = InputMultiObject(File(exists=True)) - bvec_files = InputMultiObject(File(exists=True)) - transforms = InputMultiObject() - b0_indices = traits.List() - - -class ExtractDWISForModelOutputSpec(TraitedSpec): - model_dwi_files = OutputMultiObject(File(exists=True)) - model_bvals = OutputMultiObject(File(exists=True)) - model_bvecs = OutputMultiObject(File(exists=True)) - transforms = InputMultiObject() - - -class ExtractDWIsForModel(SimpleInterface): - """Take a DWI series with interspersed b0 images and create a model-ready version""" - - input_spec = ExtractDWISForModelInputSpec - output_spec = ExtractDWISForModelOutputSpec - - def _run_interface(self, runtime): - all_images = self.inputs.dwi_files - all_bvecs = self.inputs.bvec_files - all_bvals = self.inputs.bval_files - b0_indices = self.inputs.b0_indices - transforms = self.inputs.transforms - if not len(all_images) == len(all_bvecs) == len(all_bvals) == len(transforms): - raise Exception("Image, bval, bvec inputs must be of the same length") - ok_indices = [idx for idx in range(len(all_images)) if idx not in b0_indices] - self._results["model_dwi_files"] = [all_images[idx] for idx in ok_indices] - self._results["model_bvals"] = [all_bvals[idx] for idx in ok_indices] - self._results["model_bvecs"] = [all_bvecs[idx] for idx in ok_indices] - self._results["transforms"] = [transforms[idx] for idx in ok_indices] - - return runtime - - -def quick_load_images(image_list, dtype=np.float32): - example_img = nb.load(image_list[0]) - num_images = len(image_list) - output_matrix = np.zeros(tuple(example_img.shape) + (num_images,), dtype=dtype) - for image_num, image_path in enumerate(image_list): - output_matrix[..., image_num] = nb.load(image_path).get_fdata(dtype=dtype) - return output_matrix - - -class SignalPredictionInputSpec(BaseInterfaceInputSpec): - aligned_dwis = InputMultiObject(File(exists=True)) - aligned_bvecs = traits.Either(InputMultiObject(File(exists=True)), traits.Array) - bvals = traits.Either(InputMultiObject(File(exists=True)), traits.Array) - aligned_mask = File(exists=True, mandatory=True) - aligned_b0_mean = File(exists=True, mandatory=True) - bvec_to_predict = traits.Array() - bval_to_predict = traits.Float() - minimal_q_distance = traits.Float(2.0, usedefault=True) - model = traits.Str("3dSHORE", usedefault=True) - - -class SignalPredictionOutputSpec(TraitedSpec): - predicted_image = File(exists=True) - - -class SignalPrediction(SimpleInterface): - """ """ - - input_spec = SignalPredictionInputSpec - output_spec = SignalPredictionOutputSpec - - def _run_interface(self, runtime): - pred_vec = self.inputs.bvec_to_predict - pred_val = self.inputs.bval_to_predict - # Load the mask image: - mask_img = nb.load(self.inputs.aligned_mask) - mask_array = mask_img.get_fdata() > 1e-6 - all_images = self.inputs.aligned_dwis - if isinstance(self.inputs.aligned_bvecs, np.ndarray): - bvecs = self.inputs.aligned_bvecs - else: - bvecs = concatenate_bvecs(self.inputs.aligned_bvecs) - all_bvecs = np.row_stack([np.zeros(3)] + bvecs.tolist()) - if isinstance(self.inputs.bvals, np.ndarray): - bvals = self.inputs.bvals - else: - bvals = concatenate_bvals(self.inputs.bvals, None) - all_bvals = np.array([0.0] + bvals.tolist()) - - # Which sample points are too close to the one we want to predict? - training_mask = _nonoverlapping_qspace_samples( - pred_val, pred_vec, all_bvals, all_bvecs, self.inputs.minimal_q_distance - ) - training_indices = np.flatnonzero(training_mask[1:]) - training_image_paths = [self.inputs.aligned_b0_mean] + [ - all_images[idx] for idx in training_indices - ] - training_bvecs = all_bvecs[training_mask] - training_bvals = all_bvals[training_mask] - LOGGER.info("Training with %d of %d", training_mask.sum(), len(training_mask)) - - # Load training data and fit the model - training_data = quick_load_images(training_image_paths) - training_gtab = gradient_table(bvals=training_bvals, bvecs=training_bvecs) - # set up prediction variables - prediction_bvecs = np.tile(pred_vec, (10, 1)) - prediction_bvals = np.ones(10) * pred_val - prediction_bvals[9] = 0 # prevent warning - prediction_gtab = gradient_table(bvals=prediction_bvals, bvecs=prediction_bvecs) - if self.inputs.model == "3dSHORE": - shore_model = BrainSuiteShoreModel(training_gtab, regularization="L2") - shore_fit = shore_model.fit(training_data, mask=mask_array) - # Get the shore vector for the desired coordinate - prediction_shore = brainsuite_shore_basis( - shore_model.radial_order, shore_model.zeta, prediction_gtab, shore_model.tau - ) - prediction_dir = prediction_shore[0] - # Calculate the signal prediction, reshape to 3D and save - shore_array = shore_fit._shore_coef[mask_array] - output_data = np.zeros(mask_array.shape) - output_data[mask_array] = np.dot(shore_array, prediction_dir) - - elif self.inputs.model == "tensor": - dti_wls = dti.TensorModel(training_gtab) - fit_wls = dti_wls.fit(training_data, mask=mask_array) - dti_params = fit_wls.model_params - output_data = dti.tensor_prediction( - dti_params, prediction_gtab, training_data[:, :, :, 0] - )[..., 0] - - else: - raise NotImplementedError("Unsupported model: " + self.inputs.model) - - prediction_file = op.join( - runtime.cwd, - "predicted_b%d_%.2f_%.2f_%.2f.nii.gz" - % ((pred_val,) + tuple(np.round(pred_vec, decimals=2))), - ) - nb.Nifti1Image(output_data, mask_img.affine, mask_img.header).to_filename(prediction_file) - self._results["predicted_image"] = prediction_file - - return runtime - - -class CalculateCNRInputSpec(BaseInterfaceInputSpec): - hmc_warped_images = InputMultiObject(File(exists=True)) - predicted_images = InputMultiObject(File(exists=True)) - mask_image = File(exists=True) - - -class CalculateCNROutputSpec(TraitedSpec): - cnr_image = File(exists=True) - - -class CalculateCNR(SimpleInterface): - input_spec = CalculateCNRInputSpec - output_spec = CalculateCNROutputSpec - - def _run_interface(self, runtime): - cnr_file = op.join(runtime.cwd, "SHORELine_CNR.nii.gz") - model_images = quick_load_images(self.inputs.predicted_images) - observed_images = quick_load_images(self.inputs.hmc_warped_images) - mask_image = nb.load(self.inputs.mask_image) - mask = mask_image.get_fdata() > 1e-6 - signal_vals = model_images[mask] - b0 = signal_vals[:, 0][:, np.newaxis] - signal_vals = signal_vals / b0 - signal_var = np.var(signal_vals, 1) - observed_vals = observed_images[mask] / b0 - noise_var = np.var(signal_vals - observed_vals, 1) - snr = np.nan_to_num(signal_var / noise_var) - out_mat = np.zeros(mask_image.shape) - out_mat[mask] = snr - nb.Nifti1Image(out_mat, mask_image.affine, header=mask_image.header).to_filename(cnr_file) - self._results["cnr_image"] = cnr_file - return runtime - - -class ReorderOutputsInputSpec(BaseInterfaceInputSpec): - b0_indices = traits.List(mandatory=True) - b0_mean = File(exists=True, mandatory=True) - warped_b0_images = InputMultiObject(File(exists=True), mandatory=True) - warped_dwi_images = InputMultiObject(File(exists=True), mandatory=True) - initial_transforms = InputMultiObject(File(exists=True), mandatory=True) - model_based_transforms = InputMultiObject(traits.List(), mandatory=True) - model_predicted_images = InputMultiObject(File(exists=True), mandatory=True) - - -class ReorderOutputsOutputSpec(TraitedSpec): - full_transforms = OutputMultiObject(traits.List()) - full_predicted_dwi_series = OutputMultiObject(File(exists=True)) - hmc_warped_images = OutputMultiObject(File(exists=True)) - - -class ReorderOutputs(SimpleInterface): - input_spec = ReorderOutputsInputSpec - output_spec = ReorderOutputsOutputSpec - - def _run_interface(self, runtime): - full_transforms = [] - full_predicted_dwi_series = [] - full_warped_images = [] - warped_b0_images = self.inputs.warped_b0_images[::-1] - warped_dwi_images = self.inputs.warped_dwi_images[::-1] - model_transforms = self.inputs.model_based_transforms[::-1] - model_images = self.inputs.model_predicted_images[::-1] - b0_transforms = [self.inputs.initial_transforms[idx] for idx in self.inputs.b0_indices][ - ::-1 - ] - num_dwis = len(self.inputs.initial_transforms) - - for imagenum in range(num_dwis): - if imagenum in self.inputs.b0_indices: - full_predicted_dwi_series.append(self.inputs.b0_mean) - full_transforms.append(b0_transforms.pop()) - full_warped_images.append(warped_b0_images.pop()) - else: - full_transforms.append(model_transforms.pop()) - full_predicted_dwi_series.append(model_images.pop()) - full_warped_images.append(warped_dwi_images.pop()) - - if not len(model_transforms) == len(b0_transforms) == len(model_images) == 0: - raise Exception("Unable to recombine images and transforms") - - self._results["hmc_warped_images"] = full_warped_images - self._results["full_transforms"] = full_transforms - self._results["full_predicted_dwi_series"] = full_predicted_dwi_series - - return runtime - - -class IterationSummaryInputSpec(BaseInterfaceInputSpec): - collected_motion_files = InputMultiObject(File(exists=True)) - - -class IterationSummaryOutputSpec(TraitedSpec): - iteration_summary_file = File(exists=True) - plot_file = File(exists=True) - - -class IterationSummary(SummaryInterface): - input_spec = IterationSummaryInputSpec - output_spec = IterationSummaryOutputSpec - - def _run_interface(self, runtime): - motion_files = self.inputs.collected_motion_files - output_fname = op.join(runtime.cwd, "iteration_summary.csv") - fig_output_fname = op.join(runtime.cwd, "iterdiffs.svg") - if not isdefined(motion_files): - return runtime - - all_iters = [] - for fnum, fname in enumerate(motion_files): - df = pd.read_csv(fname) - df["iter_num"] = fnum - path_parts = fname.split(os.sep) - itername = "" if "iter" not in path_parts[-3] else path_parts[-3] - df["iter_name"] = itername - all_iters.append(df) - combined = pd.concat(all_iters, axis=0, ignore_index=True) - - combined.to_csv(output_fname, index=False) - self._results["iteration_summary_file"] = output_fname - - # Create a figure for the report - _iteration_summary_plot(combined, fig_output_fname) - self._results["plot_file"] = fig_output_fname - - return runtime - - -class SHORELineReportInputSpec(BaseInterfaceInputSpec): - iteration_summary = File(exists=True) - registered_images = InputMultiObject(File(exists=True)) - original_images = InputMultiObject(File(exists=True)) - model_predicted_images = InputMultiObject(File(exists=True)) - - -class SHORELineReportOutputSpec(SummaryOutputSpec): - plot_file = File(exists=True) - - -class SHORELineReport(SummaryInterface): - input_spec = SHORELineReportInputSpec - output_spec = SHORELineReportOutputSpec - - def _run_interface(self, runtime): - images = [] - for imagenum, (orig_file, aligned_file, model_file) in enumerate( - zip( - self.inputs.original_images, - self.inputs.registered_images, - self.inputs.model_predicted_images, - ) - ): - - images.extend(before_after_images(orig_file, aligned_file, model_file, imagenum)) - - out_file = op.join(runtime.cwd, "shoreline_reg.gif") - imageio.mimsave(out_file, images, duration=1000) - self._results["plot_file"] = out_file - return runtime - - -def scaled_mip(img1, img2, img3, axis): - mip1 = img1.max(axis=axis).T - mip2 = img2.max(axis=axis).T - mip3 = img3.max(axis=axis).T - max_obs = max(mip1.max(), mip2.max(), mip3.max()) - vmax = 0.98 * max_obs - return ( - np.clip(mip1, 0, vmax) / vmax, - np.clip(mip2, 0, vmax) / vmax, - np.clip(mip3, 0, vmax) / vmax, - ) - - -def to_image(fig): - fig.subplots_adjust(hspace=0, left=0, right=1, wspace=0) - fig.canvas.draw() # draw the canvas, cache the renderer - image = np.frombuffer(fig.canvas.tostring_rgb(), dtype="uint8") - image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - return image - - -def before_after_images(orig_file, aligned_file, model_file, imagenum): - fig, ax = plt.subplots(ncols=2, figsize=(10, 5)) - fig.subplots_adjust(hspace=0, left=0, right=1, wspace=0) - for _ax in ax: - _ax.clear() - orig_img = nb.load(orig_file).get_fdata() - aligned_img = nb.load(aligned_file).get_fdata() - model_img = nb.load(model_file).get_fdata() - orig_mip, aligned_mip, target_mip = scaled_mip(orig_img, aligned_img, model_img, 0) - - # Get contours for the orig, aligned images - orig_contours = measure.find_contours(orig_mip, 0.7) - aligned_contours = measure.find_contours(aligned_mip, 0.7) - target_contours = measure.find_contours(target_mip, 0.7) - - orig_contours_low = measure.find_contours(orig_mip, 0.05) - aligned_contours_low = measure.find_contours(aligned_mip, 0.05) - target_contours_low = measure.find_contours(target_mip, 0.05) - - # Plot before - ax[0].imshow(orig_mip, vmax=1.0, vmin=0, origin="lower", cmap="gray", interpolation="nearest") - ax[1].imshow( - target_mip, vmax=1.0, vmin=0, origin="lower", cmap="gray", interpolation="nearest" - ) - ax[0].text(1, 1, "%03d: Before" % imagenum, fontsize=16, color="white") - for contour in target_contours + target_contours_low: - ax[0].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#e7298a") - ax[1].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#e7298a") - for contour in orig_contours + orig_contours_low: - ax[1].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#d95f02") - ax[0].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#d95f02") - for axis in ax: - axis.set_xticks([]) - axis.set_yticks([]) - - before_image = to_image(fig) - - # Plot after - for _ax in ax: - _ax.clear() - ax[0].imshow( - aligned_mip, vmax=1.0, vmin=0, origin="lower", cmap="gray", interpolation="nearest" - ) - ax[1].imshow( - target_mip, vmax=1.0, vmin=0, origin="lower", cmap="gray", interpolation="nearest" - ) - ax[0].text(1, 1, "%03d: After" % imagenum, fontsize=16, color="white") - for contour in target_contours + target_contours_low: - ax[0].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#e7298a") - ax[1].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#e7298a") - for contour in aligned_contours + aligned_contours_low: - ax[1].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#d95f02") - ax[0].plot(contour[:, 1], contour[:, 0], linewidth=2, alpha=0.9, color="#d95f02") - for axis in ax: - axis.set_xticks([]) - axis.set_yticks([]) - after_image = to_image(fig) - - return before_image, after_image - - -def _iteration_summary_plot(iters_df, out_file): - iters = list([item[1] for item in iters_df.groupby("iter_num")]) - shift_cols = ["shiftX", "shiftY", "shiftZ"] - rotate_cols = ["rotateX", "rotateY", "rotateZ"] - shifts = np.stack([df[shift_cols] for df in iters], -1) - rotations = np.stack([df[rotate_cols] for df in iters], -1) - - rot_diffs = np.diff(rotations, axis=2).squeeze() - shift_diffs = np.diff(shifts, axis=2).squeeze() - if len(iters) == 2: - rot_diffs = rot_diffs[..., np.newaxis] - shift_diffs = shift_diffs[..., np.newaxis] - - shiftdiff_dfs = [] - rotdiff_dfs = [] - for diffnum, (rot_diff, shift_diff) in enumerate(zip(rot_diffs.T, shift_diffs.T)): - shiftdiff_df = pd.DataFrame(shift_diff.T, columns=shift_cols) - shiftdiff_df["difference_num"] = "%02d" % diffnum - shiftdiff_dfs.append(shiftdiff_df) - - rotdiff_df = pd.DataFrame(rot_diff.T, columns=rotate_cols) - rotdiff_df["difference_num"] = "%02d" % diffnum - rotdiff_dfs.append(rotdiff_df) - - shift_diffs = pd.concat(shiftdiff_dfs, axis=0) - rotate_diffs = pd.concat(rotdiff_dfs, axis=0) - - # Plot shifts - sns.set() - fig, ax = plt.subplots(ncols=2, figsize=(10, 5)) - sns.violinplot( - x="variable", - y="value", - hue="difference_num", - ax=ax[0], - data=shift_diffs.melt(id_vars=["difference_num"]), - ) - ax[0].set_ylabel("mm") - ax[0].set_title("Shift") - - # Plot rotations - sns.violinplot( - x="variable", - y="value", - hue="difference_num", - data=rotate_diffs.melt(id_vars=["difference_num"]), - ) - ax[1].set_ylabel("Degrees") - ax[1].set_title("Rotation") - sns.despine(offset=10, trim=True, fig=fig) - fig.savefig(out_file) diff --git a/qsirecon/interfaces/surf.py b/qsirecon/interfaces/surf.py deleted file mode 100644 index 9c092a18..00000000 --- a/qsirecon/interfaces/surf.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Handling surfaces ------------------ - -""" -import os - -import nibabel as nb -import numpy as np -from nipype.interfaces.base import ( - BaseInterfaceInputSpec, - File, - SimpleInterface, - TraitedSpec, - isdefined, -) - - -class NormalizeSurfInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, exists=True, desc="Freesurfer-generated GIFTI file") - transform_file = File(exists=True, desc="FSL or LTA affine transform file") - - -class NormalizeSurfOutputSpec(TraitedSpec): - out_file = File(desc="output file with re-centered GIFTI coordinates") - - -class NormalizeSurf(SimpleInterface): - """ Normalizes a FreeSurfer-generated GIFTI image - - FreeSurfer includes an offset to the center of the brain volume that is not - respected by all software packages. - Normalization involves adding this offset to the coordinates of all - vertices, and zeroing out that offset, to ensure consistent behavior - across software packages. - In particular, this normalization is consistent with the Human Connectome - Project pipeline (see `AlgorithmSurfaceApplyAffine`_ and - `FreeSurfer2CaretConvertAndRegisterNonlinear`_), although the the HCP - may not zero out the offset. - - GIFTI files with ``midthickness``/``graymid`` in the name are also updated - to include the following metadata entries:: - - { - AnatomicalStructureSecondary: MidThickness, - GeometricType: Anatomical - } - - This interface is intended to be applied uniformly to GIFTI surface files - generated from the ``?h.white``/``?h.smoothwm`` and ``?h.pial`` surfaces, - as well as externally-generated ``?h.midthickness``/``?h.graymid`` files. - In principle, this should apply safely to any other surface, although it is - less relevant to surfaces that don't describe an anatomical structure. - - .. _AlgorithmSurfaceApplyAffine: https://github.com/Washington-University/workbench\ -/blob/1b79e56/src/Algorithms/AlgorithmSurfaceApplyAffine.cxx#L73-L91 - - .. _FreeSurfer2CaretConvertAndRegisterNonlinear: https://github.com/Washington-University/\ -Pipelines/blob/ae69b9a/PostFreeSurfer/scripts/FreeSurfer2CaretConvertAndRegisterNonlinear.sh\ -#L147-154 - - """ - - input_spec = NormalizeSurfInputSpec - output_spec = NormalizeSurfOutputSpec - - def _run_interface(self, runtime): - transform_file = self.inputs.transform_file - if not isdefined(transform_file): - transform_file = None - self._results["out_file"] = normalize_surfs( - self.inputs.in_file, transform_file, newpath=runtime.cwd - ) - return runtime - - -def normalize_surfs(in_file, transform_file, newpath=None): - """Re-center GIFTI coordinates to fit align to native T1 space - - For midthickness surfaces, add MidThickness metadata - - Coordinate update based on: - https://github.com/Washington-University/workbench/blob/1b79e56/src/Algorithms/AlgorithmSurfaceApplyAffine.cxx#L73-L91 - and - https://github.com/Washington-University/Pipelines/blob/ae69b9a/PostFreeSurfer/scripts/FreeSurfer2CaretConvertAndRegisterNonlinear.sh#L147 - """ - - img = nb.load(in_file) - transform = load_transform(transform_file) - pointset = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET")[0] - coords = pointset.data.T - c_ras_keys = ("VolGeomC_R", "VolGeomC_A", "VolGeomC_S") - ras = np.array([[float(pointset.metadata[key])] for key in c_ras_keys]) - ones = np.ones((1, coords.shape[1]), dtype=coords.dtype) - # Apply C_RAS translation to coordinates, then transform - pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype(coords.dtype) - - secondary = nb.gifti.GiftiNVPairs("AnatomicalStructureSecondary", "MidThickness") - geom_type = nb.gifti.GiftiNVPairs("GeometricType", "Anatomical") - has_ass = has_geo = False - for nvpair in pointset.meta.data: - # Remove C_RAS translation from metadata to avoid double-dipping in FreeSurfer - if nvpair.name in c_ras_keys: - nvpair.value = "0.000000" - # Check for missing metadata - elif nvpair.name == secondary.name: - has_ass = True - elif nvpair.name == geom_type.name: - has_geo = True - fname = os.path.basename(in_file) - # Update metadata for MidThickness/graymid surfaces - if "midthickness" in fname.lower() or "graymid" in fname.lower(): - if not has_ass: - pointset.meta.data.insert(1, secondary) - if not has_geo: - pointset.meta.data.insert(2, geom_type) - - if newpath is not None: - newpath = os.getcwd() - out_file = os.path.join(newpath, fname) - img.to_filename(out_file) - return out_file - - -def load_transform(fname): - """Load affine transform from file - - Parameters - ---------- - fname : str or None - Filename of an LTA or FSL-style MAT transform file. - If ``None``, return an identity transform - - Returns - ------- - affine : (4, 4) numpy.ndarray - """ - if fname is None: - return np.eye(4) - - if fname.endswith(".mat"): - return np.loadtxt(fname) - elif fname.endswith(".lta"): - with open(fname, "rb") as fobj: - for line in fobj: - if line.startswith(b"1 4 4"): - break - lines = fobj.readlines()[:4] - return np.genfromtxt(lines) - - raise ValueError("Unknown transform type; pass FSL (.mat) or LTA (.lta)") diff --git a/qsirecon/interfaces/tortoise.py b/qsirecon/interfaces/tortoise.py index dcab248f..0c48b5e2 100644 --- a/qsirecon/interfaces/tortoise.py +++ b/qsirecon/interfaces/tortoise.py @@ -8,36 +8,19 @@ import os.path as op import subprocess -import nibabel as nb import nilearn.image as nim -import numpy as np -import pandas as pd -from nipype.interfaces import ants from nipype.interfaces.base import ( BaseInterfaceInputSpec, CommandLine, CommandLineInputSpec, File, InputMultiObject, - OutputMultiObject, SimpleInterface, TraitedSpec, isdefined, traits, ) from nipype.utils.filemanip import fname_presuffix -from niworkflows.viz.utils import compose_view, cuts_from_bbox - -from ..viz.utils import plot_denoise -from .denoise import ( - SeriesPreprocReport, - SeriesPreprocReportInputSpec, - SeriesPreprocReportOutputSpec, -) -from .epi_fmap import get_best_b0_topup_inputs_from, safe_get_3d_image -from .fmap import get_distortion_grouping -from .gradients import write_concatenated_fsl_gradients -from .images import split_bvals_bvecs, to_lps LOGGER = logging.getLogger("nipype.interface") @@ -83,127 +66,6 @@ def run(self, **inputs): return super(TORTOISECommandLine, self).run(**inputs) -class _GatherDRBUDDIInputsInputSpec(TORTOISEInputSpec): - dwi_files = InputMultiObject(File(exists=True)) - original_files = InputMultiObject(File(exists=True)) - bval_files = traits.Either(InputMultiObject(File(exists=True)), File(exists=True)) - bvec_files = traits.Either(InputMultiObject(File(exists=True)), File(exists=True)) - original_files = InputMultiObject(File(exists=True)) - b0_threshold = traits.CInt(100, usedefault=True) - epi_fmaps = InputMultiObject( - File(exists=True), desc="files from fmaps/ for distortion correction" - ) - raw_image_sdc = traits.Bool(True, usedefault=True) - fieldmap_type = traits.Enum("epi", "rpe_series", mandatory=True) - dwi_series_pedir = traits.Enum("i", "i-", "j", "j-", "k", "k-", mandatory=True) - - -class _GatherDRBUDDIInputsOutputSpec(TraitedSpec): - blip_up_image = File(exists=True) - blip_up_bmat = File(exists=True) - blip_up_json = File(exists=True) - blip_down_image = File(exists=True) - blip_down_bmat = File(exists=True) - blip_assignments = traits.List() - report = traits.Str() - - -class GatherDRBUDDIInputs(SimpleInterface): - input_spec = _GatherDRBUDDIInputsInputSpec - output_spec = _GatherDRBUDDIInputsOutputSpec - - def _run_interface(self, runtime): - - # Write the metadata - up_json = op.join(runtime.cwd, "blip_up.json") - with open(up_json, "w") as up_jsonf: - up_jsonf.write('{"PhaseEncodingDirection": "%s"}\n' % self.inputs.dwi_series_pedir) - self._results["blip_up_json"] = up_json - - # Coerce the bvals and bvecs into lists of files - if isinstance(self.inputs.bval_files, list) and len(self.inputs.bval_files) == 1: - bval_files, bvec_files = split_bvals_bvecs( - self.inputs.bval_files[0], - self.inputs.bvec_files[0], - deoblique=False, - img_files=self.inputs.dwi_files, - working_dir=runtime.cwd, - ) - else: - bval_files, bvec_files = self.inputs.bval_files, self.inputs.bvec_files - - if self.inputs.fieldmap_type == "rpe_series": - ( - self._results["blip_assignments"], - self._results["blip_up_image"], - self._results["blip_up_bmat"], - self._results["blip_down_image"], - self._results["blip_down_bmat"], - ) = split_into_up_and_down_niis( - dwi_files=self.inputs.dwi_files, - bval_files=bval_files, - bvec_files=bvec_files, - original_images=self.inputs.original_files, - prefix=op.join(runtime.cwd, "drbuddi"), - make_bmat=True, - ) - - elif self.inputs.fieldmap_type == "epi": - # Use the same function that was used to get images for TOPUP, but get the images - # directly from the CSV - _, _, _, b0_csv, _, _ = get_best_b0_topup_inputs_from( - dwi_file=self.inputs.dwi_files, - bval_file=bval_files, - b0_threshold=self.inputs.b0_threshold, - cwd=runtime.cwd, - bids_origin_files=self.inputs.original_files, - epi_fmaps=self.inputs.epi_fmaps, - max_per_spec=True, - raw_image_sdc=self.inputs.raw_image_sdc, - ) - - b0s_df = pd.read_csv(b0_csv) - selected_images = b0s_df[b0s_df.selected_for_sdc].reset_index(drop=True) - up_row = selected_images.loc[0] - down_row = selected_images.loc[1] - up_img = to_lps(safe_get_3d_image(up_row.bids_origin_file, up_row.original_volume)) - up_img.set_data_dtype("float32") - down_img = to_lps( - safe_get_3d_image(down_row.bids_origin_file, down_row.original_volume) - ) - down_img.set_data_dtype("float32") - - # Save the images - blip_up_nii = op.join(runtime.cwd, "blip_up_b0.nii") - blip_down_nii = op.join(runtime.cwd, "blip_down_b0.nii") - up_img.to_filename(blip_up_nii) - down_img.to_filename(blip_down_nii) - self._results["blip_up_image"] = blip_up_nii - self._results["blip_down_image"] = blip_down_nii - self._results["blip_assignments"] = split_into_up_and_down_niis( - dwi_files=self.inputs.dwi_files, - bval_files=bval_files, - bvec_files=bvec_files, - original_images=self.inputs.original_files, - prefix=op.join(runtime.cwd, "drbuddi"), - make_bmat=False, - assignments_only=True, - ) - self._results["blip_up_bmat"] = write_dummy_bmtxt(blip_up_nii) - self._results["blip_down_bmat"] = write_dummy_bmtxt(blip_down_nii) - - return runtime - - -def write_dummy_bmtxt(nii_file): - new_fname = fname_presuffix(nii_file, suffix=".bmtxt", use_ext=False) - img = nim.load_img(nii_file) - nvols = 1 if img.ndim < 4 else img.ndim.shape[3] - with open(new_fname, "w") as bmtxt_f: - bmtxt_f.write("\n".join(["0 0 0 0 0 0"] * nvols) + "\n") - return new_fname - - class _DRBUDDIInputSpec(TORTOISEInputSpec): num_threads = traits.Int( desc="number of OMP threads", @@ -349,234 +211,6 @@ def _list_outputs(self): return outputs -class _DRBUDDIAggregateOutputsInputSpec(TORTOISEInputSpec): - blip_assignments = traits.List() - undistorted_reference = File(exists=True) - bdown_to_bup_rigid_trans_h5 = File(exists=True) - undistorted_reference = File(exists=True) - blip_down_b0 = File(exists=True) - blip_down_b0_corrected = File(exists=True) - blip_down_b0_corrected_jac = File(exists=True) - blip_down_b0_quad = File(exists=True) - blip_up_b0 = File(exists=True) - blip_up_b0_corrected = File(exists=True) - blip_up_b0_corrected_jac = File(exists=True) - blip_up_b0_quad = File(exists=True) - deformation_finv = File(exists=True, desc="blip up to b0_corrected") - deformation_minv = File(exists=True) - blip_up_FA = File(exists=True) - blip_down_FA = File(exists=True) - fieldmap_type = traits.Enum("epi", "rpe_series", mandatory=True) - structural_image = File(exists=True) - wm_seg = File(exists=True, desc="White matter segmentation image") - - -class _DRBUDDIAggregateOutputsOutputSpec(TraitedSpec): - # Aggregated outputs for convenience - sdc_warps = OutputMultiObject(File(exists=True)) - sdc_scaling_images = OutputMultiObject(File(exists=True)) - # Fieldmap outputs for the reports - up_fa_corrected_image = File(exists=True) - down_fa_corrected_image = File(exists=True) - # The best image for coregistration to the corrected DWI - b0_ref = File(exists=True) - - -class DRBUDDIAggregateOutputs(SimpleInterface): - input_spec = _DRBUDDIAggregateOutputsInputSpec - output_spec = _DRBUDDIAggregateOutputsOutputSpec - - def _run_interface(self, runtime): - - # If the structural image has been used, return that as the b0ref, otherwise - # it's the b0_corrected_final - self._results["b0_ref"] = ( - self.inputs.structural_image - if isdefined(self.inputs.structural_image) - else self.inputs.undistorted_reference - ) - - # there may be 2 transforms for the blip down data. If so, compose them - if isdefined(self.inputs.bdown_to_bup_rigid_trans_h5): - # combine the rigid with displacement - down_warp = op.join(runtime.cwd, "blip_down_composite.nii.gz") - xfm = ants.ApplyTransforms( - # input_image is ignored because print_out_composite_warp_file is True - input_image=self.inputs.blip_down_b0, - transforms=[self.inputs.deformation_minv, self.inputs.bdown_to_bup_rigid_trans_h5], - reference_image=self.inputs.undistorted_reference, - output_image=down_warp, - print_out_composite_warp_file=True, - interpolation="LanczosWindowedSinc", - ) - xfm.terminal_output = "allatonce" - xfm.resource_monitor = False - _ = xfm.run() - else: - down_warp = self.inputs.deformation_minv - - # Calculate the scaling images - scaling_blip_up_file = op.join(runtime.cwd, "blip_up_scale.nii.gz") - scaling_blip_down_file = op.join(runtime.cwd, "blip_down_scale.nii.gz") - scaling_blip_up_img = nim.math_img( - "a/b", a=self.inputs.undistorted_reference, b=self.inputs.blip_up_b0_corrected - ) - scaling_blip_up_img.to_filename(scaling_blip_up_file) - scaling_blip_down_img = nim.math_img( - "a/b", a=self.inputs.undistorted_reference, b=self.inputs.blip_down_b0_corrected - ) - scaling_blip_down_img.to_filename(scaling_blip_down_file) - - self._results["sdc_warps"] = [ - self.inputs.deformation_finv if blip_dir == "up" else down_warp - for blip_dir in self.inputs.blip_assignments - ] - self._results["sdc_scaling_images"] = [ - scaling_blip_up_file if blip_dir == "up" else scaling_blip_down_file - for blip_dir in self.inputs.blip_assignments - ] - - if self.inputs.fieldmap_type == "rpe_series": - fa_up_warped = fname_presuffix( - self.inputs.blip_up_FA, newpath=runtime.cwd, suffix="_corrected" - ) - xfm_fa_up = ants.ApplyTransforms( - # input_image is ignored because print_out_composite_warp_file is True - input_image=self.inputs.blip_up_FA, - transforms=[self.inputs.deformation_finv], - reference_image=self.inputs.undistorted_reference, - output_image=fa_up_warped, - interpolation="NearestNeighbor", - ) - xfm_fa_up.terminal_output = "allatonce" - xfm_fa_up.resource_monitor = False - xfm_fa_up.run() - - fa_down_warped = fname_presuffix( - self.inputs.blip_down_FA, newpath=runtime.cwd, suffix="_corrected" - ) - xfm_fa_down = ants.ApplyTransforms( - # input_image is ignored because print_out_composite_warp_file is True - input_image=self.inputs.blip_down_FA, - transforms=[self.inputs.deformation_minv, self.inputs.bdown_to_bup_rigid_trans_h5], - reference_image=self.inputs.undistorted_reference, - output_image=fa_down_warped, - interpolation="NearestNeighbor", - ) - xfm_fa_down.terminal_output = "allatonce" - xfm_fa_down.resource_monitor = False - xfm_fa_down.run() - self._results["up_fa_corrected_image"] = fa_up_warped - self._results["down_fa_corrected_image"] = fa_down_warped - - return runtime - - -class _GibbsInputSpec(TORTOISEInputSpec, SeriesPreprocReportInputSpec): - """Gibbs input_nifti output_nifti kspace_coverage(1,0.875,0.75) - phase_encoding_dir nsh minW(optional) maxW(optional)""" - - in_file = traits.File(exists=True, mandatory=True, position=0, argstr="%s") - out_file = traits.File( - argstr="%s", - position=1, - name_source="in_file", - name_template="%s_unrung.nii", - use_extension=False, - ) - kspace_coverage = traits.Float(mandatory=True, position=2, argstr="%.4f") - phase_encoding_dir = traits.Enum( - 0, 1, mandatory=True, argstr="%d", position=3, desc="0: horizontal, 1:vertical" - ) - nsh = traits.Int(argstr="%d", position=4) - min_w = traits.Int() - mask = File() - num_threads = traits.Int(1, usedefault=True, nohash=True) - - -class _GibbsOutputSpec(SeriesPreprocReportOutputSpec): - out_file = File(exists=True) - - -class Gibbs(SeriesPreprocReport, TORTOISECommandLine): - input_spec = _GibbsInputSpec - output_spec = _GibbsOutputSpec - _cmd = "Gibbs" - - def _get_plotting_images(self): - input_dwi = nim.load_img(self.inputs.in_file) - outputs = self._list_outputs() - ref_name = outputs.get("out_file") - denoised_nii = nim.load_img(ref_name) - return input_dwi, denoised_nii, None - - def _generate_report(self): - """Generate a reportlet.""" - LOGGER.info("Generating denoising visual report") - - input_dwi, denoised_nii, _ = self._get_plotting_images() - - # find an image to use as the background - image_data = input_dwi.get_fdata() - image_intensities = np.array([img.mean() for img in image_data.T]) - lowb_index = int(np.argmax(image_intensities)) - highb_index = int(np.argmin(image_intensities)) - - # Original images - orig_lowb_nii = input_dwi.slicer[..., lowb_index] - orig_highb_nii = input_dwi.slicer[..., highb_index] - - # Denoised images - denoised_lowb_nii = denoised_nii.slicer[..., lowb_index] - denoised_highb_nii = denoised_nii.slicer[..., highb_index] - - # Find spatial extent of the image - contour_nii = mask_nii = None - if isdefined(self.inputs.mask): - contour_nii = nim.load_img(self.inputs.mask) - else: - mask_nii = nim.threshold_img(denoised_lowb_nii, 50) - cuts = cuts_from_bbox(contour_nii or mask_nii, cuts=self._n_cuts) - - diff_lowb_nii = nb.Nifti1Image( - orig_lowb_nii.get_fdata() - denoised_lowb_nii.get_fdata(), - affine=denoised_lowb_nii.affine, - ) - diff_highb_nii = nb.Nifti1Image( - orig_highb_nii.get_fdata() - denoised_highb_nii.get_fdata(), - affine=denoised_highb_nii.affine, - ) - - # Call composer - compose_view( - plot_denoise( - denoised_lowb_nii, - denoised_highb_nii, - "moving-image", - estimate_brightness=True, - cuts=cuts, - label="De-Gibbs", - lowb_contour=None, - highb_contour=None, - compress=False, - ), - plot_denoise( - diff_lowb_nii, - diff_highb_nii, - "fixed-image", - estimate_brightness=True, - cuts=cuts, - label="Estimated Ringing", - lowb_contour=None, - highb_contour=None, - compress=False, - ), - out_file=self._out_report, - ) - - self._calculate_nmse(input_dwi, denoised_nii) - - class _TORTOISEConvertInputSpec(BaseInterfaceInputSpec): bval_file = File(exists=True, mandatory=True, copyfile=True) bvec_file = File(exists=True, mandatory=True, copyfile=True) @@ -861,130 +495,7 @@ class ComputeMAPMRI_NG(TORTOISEReconCommandLine): _suffix_map = {"ng_file": "_NG", "ngpar_file": "_NGpar", "ngperp_file": "_NGperp"} -def split_into_up_and_down_niis( - dwi_files, - bval_files, - bvec_files, - original_images, - prefix, - make_bmat=True, - assignments_only=False, -): - """Takes the concatenated output from pre_hmc_wf and split it into "up" and "down" - decompressed nifti files with float32 datatypes.""" - group_names, group_assignments = get_distortion_grouping(original_images) - - if not len(set(group_names)) == 2 and not assignments_only: - raise Exception("DRBUDDI requires exactly one blip up and one blip down") - - up_images = [] - up_bvals = [] - up_bvecs = [] - up_prefix = prefix + "_up_dwi" - up_dwi_file = up_prefix + ".nii" - up_bmat_file = up_prefix + ".bmtxt" - down_images = [] - down_bvals = [] - down_bvecs = [] - down_prefix = prefix + "_down_dwi" - down_dwi_file = down_prefix + ".nii" - down_bmat_file = down_prefix + ".bmtxt" - - # We know up is first because we concatenated them ourselves - up_group_name = group_assignments[0] - blip_assignments = [] - for dwi_file, bval_file, bvec_file, distortion_group in zip( - dwi_files, bval_files, bvec_files, group_assignments - ): - - if distortion_group == up_group_name: - up_images.append(dwi_file) - up_bvals.append(bval_file) - up_bvecs.append(bvec_file) - blip_assignments.append("up") - else: - down_images.append(dwi_file) - down_bvals.append(bval_file) - down_bvecs.append(bvec_file) - blip_assignments.append("down") - - if assignments_only: - return blip_assignments - - # Write the 4d up image - up_4d = nim.concat_imgs(up_images, dtype="float32", auto_resample=False) - up_4d.set_data_dtype("float32") - up_4d.to_filename(up_dwi_file) - up_bval_file, up_bvec_file = write_concatenated_fsl_gradients(up_bvals, up_bvecs, up_prefix) - - # Write the 4d down image - down_4d = nim.concat_imgs(down_images, dtype="float32", auto_resample=False) - down_4d.set_data_dtype("float32") - down_4d.to_filename(down_dwi_file) - down_bval_file, down_bvec_file = write_concatenated_fsl_gradients( - down_bvals, down_bvecs, down_prefix - ) - - # Send back FSL-style gradients - if not make_bmat: - return ( - blip_assignments, - up_dwi_file, - up_bval_file, - up_bvec_file, - down_dwi_file, - down_bval_file, - down_bvec_file, - ) - - # Convert to bmatrix text file - make_bmat_file(up_bval_file, up_bvec_file) - make_bmat_file(down_bval_file, down_bvec_file) - - return blip_assignments, up_dwi_file, up_bmat_file, down_dwi_file, down_bmat_file - - def make_bmat_file(bvals, bvecs): pout = subprocess.run(["FSLBVecsToTORTOISEBmatrix", op.abspath(bvals), op.abspath(bvecs)]) print(pout) return bvals.replace("bval", "bmtxt") - - -def generate_drbuddi_boilerplate(fieldmap_type, t2w_sdc, with_topup=False): - """Generate boilerplate that describes how DRBUDDI is being used.""" - - desc = ["\n\nDRBUDDI [@drbuddi], part of the TORTOISE [@tortoisev3] software package,"] - if not with_topup: - # Until now there will have been no description of the SDC procedure. - # Add extra details about the input data. - desc.append( - "was used to perform susceptibility distortion correction. " - "Data was collected with reversed phase-encode blips, resulting " - "in pairs of images with distortions going in opposite directions." - ) - else: - desc += ["was used to perform a second stage of distortion correction."] - - # Describe what's going on - if fieldmap_type == "epi": - desc.append( - "DRBUDDI used b=0 reference images with reversed " - "phase encoding directions to estimate" - ) - else: - desc.append( - "DRBUDDI used multiple motion-corrected DWI series acquired " - "with opposite phase encoding " - "directions. A b=0 image **and** the Fractional Anisotropy " - "images from both phase encoding diesctions were used together in " - "a multi-modal registration to estimate" - ) - desc.append("the susceptibility-induced off-resonance field.") - - if t2w_sdc: - desc.append("A T2-weighted image was included in the multimodal registration.") - desc.append( - "Signal intensity was adjusted " - "in the final interpolated images using a method similar to LSR.\n\n" - ) - return " ".join(desc) diff --git a/qsirecon/interfaces/utils.py b/qsirecon/interfaces/utils.py index d97bba36..308dd4a1 100644 --- a/qsirecon/interfaces/utils.py +++ b/qsirecon/interfaces/utils.py @@ -10,21 +10,16 @@ import os -import nibabel as nb -import numpy as np -import scipy.ndimage as nd from nipype import logging from nipype.interfaces import ants from nipype.interfaces.base import ( BaseInterfaceInputSpec, - DynamicTraitedSpec, File, SimpleInterface, TraitedSpec, isdefined, traits, ) -from nipype.interfaces.io import add_traits from nipype.utils.filemanip import fname_presuffix from ..utils.atlases import get_atlases @@ -127,194 +122,3 @@ def label_convert(original_atlas, output_mif, orig_txt, mrtrix_txt, metadata): mrtrix_f.write("{}\t{}\n".format(row_num + 1, roi_name)) cmd = ["labelconvert", original_atlas, orig_txt, mrtrix_txt, output_mif] os.system(" ".join(cmd)) - - -class AddTSVHeaderInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input file") - columns = traits.List(traits.Str, mandatory=True, desc="header for columns") - - -class AddTSVHeaderOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output average file") - - -class AddTSVHeader(SimpleInterface): - """Add a header row to a TSV file - - .. testsetup:: - - >>> import os - >>> import pandas as pd - >>> import numpy as np - >>> from tempfile import TemporaryDirectory - >>> tmpdir = TemporaryDirectory() - >>> os.chdir(tmpdir.name) - - .. doctest:: - - An example TSV: - - >>> np.savetxt('data.tsv', np.arange(30).reshape((6, 5)), delimiter='\t') - - Add headers: - - >>> from qsirecon.interfaces import AddTSVHeader - >>> addheader = AddTSVHeader() - >>> addheader.inputs.in_file = 'data.tsv' - >>> addheader.inputs.columns = ['a', 'b', 'c', 'd', 'e'] - >>> res = addheader.run() - >>> pd.read_csv(res.outputs.out_file, sep='\s+', index_col=None, - ... engine='python') # doctest: +NORMALIZE_WHITESPACE - a b c d e - 0 0.0 1.0 2.0 3.0 4.0 - 1 5.0 6.0 7.0 8.0 9.0 - 2 10.0 11.0 12.0 13.0 14.0 - 3 15.0 16.0 17.0 18.0 19.0 - 4 20.0 21.0 22.0 23.0 24.0 - 5 25.0 26.0 27.0 28.0 29.0 - - .. testcleanup:: - - >>> tmpdir.cleanup() - - """ - - input_spec = AddTSVHeaderInputSpec - output_spec = AddTSVHeaderOutputSpec - - def _run_interface(self, runtime): - out_file = fname_presuffix( - self.inputs.in_file, suffix="_motion.tsv", newpath=runtime.cwd, use_ext=False - ) - data = np.loadtxt(self.inputs.in_file) - np.savetxt( - out_file, data, delimiter="\t", header="\t".join(self.inputs.columns), comments="" - ) - - self._results["out_file"] = out_file - return runtime - - -class TestInputInputSpec(BaseInterfaceInputSpec): - test1 = traits.Any() - - -class TestInput(SimpleInterface): - input_spec = TestInputInputSpec - - def _run_interface(self, runtime): - return runtime - - -class ConcatAffinesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - invert = traits.Bool(False, usedefault=True, desc="Invert output transform") - - -class ConcatAffinesOutputSpec(TraitedSpec): - out_mat = File(exists=True, desc="Output transform") - - -class ConcatAffines(SimpleInterface): - input_spec = ConcatAffinesInputSpec - output_spec = ConcatAffinesOutputSpec - - def __init__(self, num_affines=0, *args, **kwargs): - super(ConcatAffines, self).__init__(*args, **kwargs) - self._num_affines = num_affines - trait_type = File(exists=True) - if num_affines == 0: - add_traits(self.inputs, ["mat_list"], trait_type) - elif num_affines < 26: - add_traits(self.inputs, self._get_names(num_affines), trait_type) - - @staticmethod - def _get_names(num_affines): - A = ord("A") - 1 - return ["mat_{}to{}".format(chr(X), chr(X + 1)) for X in range(A + num_affines, A, -1)] - - def _run_interface(self, runtime): - out_mat = os.path.join(runtime.cwd, "concat.mat") - in_list = [self.inputs.get()[name] for name in self._get_names(self._num_affines)] - - out_xfm = _concat_xfms(in_list, invert=self.inputs.invert) - np.savetxt(out_mat, out_xfm, fmt=str("%.12g")) - - self._results["out_mat"] = out_mat - return runtime - - -def _tpm2roi( - in_tpm, - in_mask, - mask_erosion_mm=None, - erosion_mm=None, - mask_erosion_prop=None, - erosion_prop=None, - pthres=0.95, - newpath=None, -): - """ - Generate a mask from a tissue probability map - """ - tpm_img = nb.load(in_tpm) - roi_mask = (tpm_img.get_fdata() >= pthres).astype(np.uint8) - - eroded_mask_file = None - erode_in = ( - mask_erosion_mm is not None - and mask_erosion_mm > 0 - or mask_erosion_prop is not None - and mask_erosion_prop < 1 - ) - if erode_in: - eroded_mask_file = fname_presuffix(in_mask, suffix="_eroded", newpath=newpath) - mask_img = nb.load(in_mask) - mask_data = mask_img.get_fdata().astype(np.uint8) - if mask_erosion_mm: - iter_n = max(int(mask_erosion_mm / max(mask_img.header.get_zooms())), 1) - mask_data = nd.binary_erosion(mask_data, iterations=iter_n) - else: - orig_vol = np.sum(mask_data > 0) - while np.sum(mask_data > 0) / orig_vol > mask_erosion_prop: - mask_data = nd.binary_erosion(mask_data, iterations=1) - - # Store mask - eroded = nb.Nifti1Image(mask_data, mask_img.affine, mask_img.header) - eroded.set_data_dtype(np.uint8) - eroded.to_filename(eroded_mask_file) - - # Mask TPM data (no effect if not eroded) - roi_mask[~mask_data] = 0 - - # shrinking - erode_out = ( - erosion_mm is not None and erosion_mm > 0 or erosion_prop is not None and erosion_prop < 1 - ) - if erode_out: - if erosion_mm: - iter_n = max(int(erosion_mm / max(tpm_img.header.get_zooms())), 1) - iter_n = int(erosion_mm / max(tpm_img.header.get_zooms())) - roi_mask = nd.binary_erosion(roi_mask, iterations=iter_n) - else: - orig_vol = np.sum(roi_mask > 0) - while np.sum(roi_mask > 0) / orig_vol > erosion_prop: - roi_mask = nd.binary_erosion(roi_mask, iterations=1) - - # Create image to resample - roi_fname = fname_presuffix(in_tpm, suffix="_roi", newpath=newpath) - roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header) - roi_img.set_data_dtype(np.uint8) - roi_img.to_filename(roi_fname) - return roi_fname, eroded_mask_file or in_mask - - -def _concat_xfms(in_list, invert): - transforms = [np.loadtxt(in_mat) for in_mat in in_list] - out_xfm = transforms.pop(0) - for xfm in transforms: - out_xfm = out_xfm.dot(xfm) - - if invert: - out_xfm = np.linalg.inv(out_xfm) - - return out_xfm diff --git a/qsirecon/utils/__init__.py b/qsirecon/utils/__init__.py index c19439e7..a2db023f 100644 --- a/qsirecon/utils/__init__.py +++ b/qsirecon/utils/__init__.py @@ -2,5 +2,4 @@ # -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: -from .bids import collect_data from .misc import check_deps diff --git a/qsirecon/utils/bids.py b/qsirecon/utils/bids.py index 319ddb3f..6cf27819 100644 --- a/qsirecon/utils/bids.py +++ b/qsirecon/utils/bids.py @@ -40,37 +40,8 @@ import warnings from pathlib import Path -import nibabel as nb -import numpy as np from bids import BIDSLayout -IMPORTANT_DWI_FIELDS = [ - # From image headers: - "Obliquity", - "ImageOrientation", - "NumVolumes", - "Dim1Size", - "Dim2Size", - "Dim3Size", - "VoxelSizeDim1", - "VoxelSizeDim2", - "VoxelSizeDim3", - # From sidecars: - "ParallelReductionFactorInPlane", - "ParallelAcquisitionTechnique", - "ParallelAcquisitionTechnique", - "PartialFourier", - "PhaseEncodingDirection", - "EffectiveEchoSpacing", - "TotalReadoutTime", - "EchoTime", - "SliceEncodingDirection", - "DwellTime", - "FlipAngle", - "MultibandAccelerationFactor", - "RepetitionTime", -] - class BIDSError(ValueError): def __init__(self, message, bids_root): @@ -177,41 +148,6 @@ def collect_participants(bids_dir, participant_label=None, strict=False, bids_va return found_label -def collect_data(bids_dir, participant_label, filters=None, bids_validate=True): - """Use pybids to retrieve the input data for a given participant.""" - if isinstance(bids_dir, BIDSLayout): - layout = bids_dir - else: - layout = BIDSLayout(str(bids_dir), validate=bids_validate) - - queries = { - "fmap": {"datatype": "fmap"}, - "sbref": {"datatype": "func", "suffix": "sbref"}, - "flair": {"datatype": "anat", "suffix": "FLAIR"}, - "t2w": {"datatype": "anat", "suffix": "T2w"}, - "t1w": {"datatype": "anat", "suffix": "T1w"}, - "roi": {"datatype": "anat", "suffix": "roi"}, - "dwi": {"datatype": "dwi", "part": ["mag", None], "suffix": "dwi"}, - } - bids_filters = filters or {} - for acq, entities in bids_filters.items(): - queries[acq].update(entities) - - subj_data = { - dtype: sorted( - layout.get( - return_type="file", - subject=participant_label, - extension=["nii", "nii.gz"], - **query, - ) - ) - for dtype, query in queries.items() - } - - return subj_data, layout - - def write_derivative_description(bids_dir, deriv_dir): from qsirecon import __version__ @@ -372,30 +308,3 @@ def validate_input_dir(exec_env, bids_dir, participant_label): def _get_shub_version(singularity_url): raise ValueError("Not yet implemented") - - -def update_metadata_from_nifti_header(metadata, nifti_file): - """Update a BIDS metadata dictionary with info from a NIfTI header. - - Code borrowed from CuBIDS. - """ - img = nb.load(nifti_file) - # get important info from niftis - obliquity = np.any(nb.affines.obliquity(img.affine) > 1e-4) - voxel_sizes = img.header.get_zooms() - matrix_dims = img.shape - # add nifti info to corresponding sidecars​ - - metadata["Obliquity"] = str(obliquity) - metadata["VoxelSizeDim1"] = float(voxel_sizes[0]) - metadata["VoxelSizeDim2"] = float(voxel_sizes[1]) - metadata["VoxelSizeDim3"] = float(voxel_sizes[2]) - metadata["Dim1Size"] = matrix_dims[0] - metadata["Dim2Size"] = matrix_dims[1] - metadata["Dim3Size"] = matrix_dims[2] - if img.ndim == 4: - metadata["NumVolumes"] = matrix_dims[3] - elif img.ndim == 3: - metadata["NumVolumes"] = 1.0 - orient = nb.orientations.aff2axcodes(img.affine) - metadata["ImageOrientation"] = "".join(orient) + "+" diff --git a/qsirecon/utils/bspline.py b/qsirecon/utils/bspline.py deleted file mode 100644 index bb59c2e8..00000000 --- a/qsirecon/utils/bspline.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: - -import numpy as np -from nipype import logging - -LOGGER = logging.getLogger("nipype.interfaces") - - -def get_ijk(data, offset=0): - """ - Calculates voxel coordinates from data - """ - from numpy import mgrid - - if not isinstance(offset, (list, tuple)): - offset = [offset] * 3 - - grid = mgrid[ - offset[0] : (offset[0] + data.shape[0]), - offset[1] : (offset[1] + data.shape[1]), - offset[2] : (offset[2] + data.shape[2]), - ] - return grid.reshape(3, -1).T - - -def compute_affine(data, zooms): - """ - Compose a RAS affine mat, since the affine of the image might not be RAS - """ - aff = np.eye(4) * (list(zooms) + [1]) - aff[:3, 3] -= aff[:3, :3].dot(np.array(data.shape[:3], dtype=float) - 1.0) * 0.5 - return aff - - -def tbspl_eval(points, knots, zooms, njobs=None): - """ - Evaluate tensor product BSpline - """ - raise Exception("Removed BSpline") - - -def _evalp(args): - import numpy as np - from scipy.sparse import csr_matrix - - point, knots, vbspl, zooms = args - u_vec = (knots - point[np.newaxis, ...]) / zooms[np.newaxis, ...] - c = vbspl(u_vec.reshape(-1)).reshape((knots.shape[0], 3)).prod(axis=1) - return csr_matrix(c) diff --git a/qsirecon/utils/grouping.py b/qsirecon/utils/grouping.py deleted file mode 100644 index 604009c6..00000000 --- a/qsirecon/utils/grouping.py +++ /dev/null @@ -1,1220 +0,0 @@ -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Utilities to group scans based on their acquisition parameters -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Download many variations of fieldmaps and dwi data - -Examples --------- -Set up tests ->>> import os ->>> from qsirecon.utils.testing import get_grouping_test_data ->>> data_root = get_grouping_test_data() ->>> os.chdir(data_root) -""" -import logging -from collections import defaultdict - -from nipype.utils.filemanip import split_filename - -from .. import config -from ..interfaces.bids import get_bids_params - -LOGGER = logging.getLogger("nipype.workflow") - - -def group_dwi_scans( - subject_data, - using_fsl=False, - combine_scans=True, - ignore_fieldmaps=False, - concatenate_distortion_groups=False, -): - """Determine which scans can be concatenated based on their acquisition parameters. - - Parameters - ---------- - bids_layout : :obj:`pybids.BIDSLayout` - A PyBIDS layout - group_for_eddy : :obj:`bool` - Should a plus and minus series be grouped together for TOPUP/eddy? - combine_scans : :obj:`bool` - Should scan concatention happen? - concatenate_distortion_groups : :obj:`bool` - Will distortion groups get merged at the end of the pipeline? - - Returns - ------- - scan_groups : :obj:`list` of :obj:`dict` - A dict where the keys are the BIDS derivatives name of the output file after - concatenation. The values are lists of dwi files in that group. - """ - # Handle the grouping of multiple dwi files within a session - dwi_session_groups = get_session_groups(config.execution.layout, subject_data, combine_scans) - - # Group them by their warp group - dwi_fmap_groups = [] - for dwi_session_group in dwi_session_groups: - dwi_fmap_groups.extend( - group_by_warpspace(dwi_session_group, config.execution.layout, ignore_fieldmaps) - ) - - if using_fsl: - return group_for_eddy(dwi_fmap_groups) - - if concatenate_distortion_groups: - return dwi_fmap_groups, group_for_concatenation(dwi_fmap_groups) - - return dwi_fmap_groups, {} - - -def get_session_groups(layout, subject_data, combine_all_dwis): - """Handle the grouping of multiple dwi files within a session. - - Parameters - ---------- - layout : :obj:`pybids.BIDSLayout` - A PyBIDS layout - subject_data : :obj:`dict` - A dictionary of BIDS data for a single subject - combine_all_dwis : :obj:`bool` - If True, combine all dwi files within a session into a single group - - Returns - ------- - dwi_session_groups : :obj:`list` of :obj:`list` - A list of lists of dwi files. Each list of dwi files is a group of scans - that can be concatenated together. - """ - sessions = layout.get_sessions() if layout is not None else [] - all_dwis = subject_data["dwi"] - dwi_session_groups = [] - if not combine_all_dwis: - dwi_session_groups = [[dwi] for dwi in all_dwis] - - else: - if sessions: - LOGGER.info("Combining all dwi files within each available session:") - for session in sessions: - session_files = [img for img in all_dwis if "ses-" + session in img] - LOGGER.info("\t- %d scans in session %s", len(session_files), session) - dwi_session_groups.append(session_files) - else: - LOGGER.info("Combining all %d dwis within the single available session", len(all_dwis)) - dwi_session_groups = [all_dwis] - - return dwi_session_groups - - -FMAP_PRIORITY = { - "dwi": 0, - "epi": 1, - "fieldmap": 2, - "phasediff": 3, - "phase1": 4, - "phase": 4, - "syn": 5, -} - - -def get_highest_priority_fieldmap(fmap_infos): - """Return a dictionary describing the highest priority fieldmap. - - Parameters - ---------- - fmap_infos : :obj:`list` of :obj:`dict` - A list of dictionaries describing fieldmaps. Each dictionary must have a - ``suffix`` key and may have an ``epi`` key. - - Returns - ------- - selected_fmap_info : :obj:`dict` - The dictionary describing the highest priority fieldmap. - This will be the entry from ``fmap_infos`` with the highest priority value. - If no fieldmaps are found, the dictionary will have a ``suffix`` key with a - value of ``None``. - - Examples - -------- - Invent some potential fieldmaps - >>> epi_fmap1 = {"epi": "/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz", "suffix": "epi"} - >>> epi_fmap2 = {"epi": "/data/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz", "suffix": "epi"} - >>> epi_fmap3 = {"epi": "/data/sub-1/fmap/sub-1_dir-PA_epi.nii.gz", "suffix": "epi"} - >>> - >>> phasediff_fmap = {"phasediff": "/data/sub-1/fmap/sub-1_phasediff.nii.gz", - ... "suffix": "phasediff"} - >>> phases_fmap = {"phase1": "/data/sub-1/fmap/sub-1_phase1.nii.gz", - ... "suffix": "phase1"} - >>> - >>> dwi_fmap1 = {"dwi": "/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz", "suffix": "dwi"} - >>> dwi_fmap2 = {'suffix': 'dwi', - ... 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz']} - - When there are no fieldmaps in ``fmaps/``, but a reverse PE DWI series - >>> get_highest_priority_fieldmap([dwi_fmap1]) - {'dwi': '/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz', 'suffix': 'dwi'} - - There is both an epi fieldmap and a phase1/phase2 GRE fieldmap - >>> get_highest_priority_fieldmap([epi_fmap1, phases_fmap]) - {'suffix': 'epi', 'epi': ['/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz']} - - Multiple EPI fieldmaps - >>> get_highest_priority_fieldmap( - ... [epi_fmap1, epi_fmap2, epi_fmap3]) # doctest: +NORMALIZE_WHITESPACE - {'suffix': 'epi', - 'epi': ['/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz', - '/data/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz', - '/data/sub-1/fmap/sub-1_dir-PA_epi.nii.gz']} - - An EPI fieldmap from ``fmap/`` should be chosen over a reverse PE DWI series - >>> get_highest_priority_fieldmap([epi_fmap1, dwi_fmap2]) - {'suffix': 'epi', 'epi': ['/data/sub-1/fmap/sub-1_dir-AP_run-1_epi.nii.gz']} - - """ - # Find fieldmaps - default_priority = max(FMAP_PRIORITY.values()) + 1 - priority = default_priority - selected_fmap_info = {"suffix": None} - - # collapse multiple EPI fieldmaps into one entry - epi_fmaps = sorted( - [fmap_info["epi"] for fmap_info in fmap_infos if fmap_info.get("suffix") == "epi"] - ) - if epi_fmaps: - epi_info = {"suffix": "epi", "epi": epi_fmaps} - fmap_infos = [ - fmap_info for fmap_info in fmap_infos if fmap_info.get("suffix") != "epi" - ] + [epi_info] - - # Select the highest priority fieldmap - for fmap_info in fmap_infos: - if fmap_info.get("suffix") == "phase": - fmap_info["suffix"] = "phase1" - - fmap_type = fmap_info.get("suffix") - if fmap_type not in FMAP_PRIORITY: - continue - - this_priority = FMAP_PRIORITY[fmap_type] - if this_priority < priority: - priority = this_priority - selected_fmap_info = fmap_info - - return selected_fmap_info - - -def find_fieldmaps_from_other_dwis(dwi_files, dwi_file_metadatas): - """Find a list of files in the dwi/ directory that can be used for distortion correction. - - It is common to acquire DWI scans with opposite phase encoding directions so they can be - used to correct each other's EPI distortion. There is currently no mechanism in BIDS to - specify whether b=0 scans in dwi/ can be used as fieldmaps for one another. - - Parameters - ---------- - dwi_files : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - dwi_file_metadatas : :obj:`list` of :obj:`dict` - A list of dictionaries containing metadata for each dwi file. - Each dictionary should have a ``PhaseEncodingDirection`` key. - - Returns - ------- - dwi_series_fieldmaps : :obj:`dict` - A dictionary where the keys are the full paths to dwi files and the values are - dictionaries describing the fieldmap. If no fieldmap is found, the dictionary - will be empty. - - Examples - -------- - - A single scan with no opportunities to SDC with a DWI scan - >>> from qsirecon.utils.grouping import find_fieldmaps_from_other_dwis - >>> single_dwi_file = ["/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz"] - >>> single_dwi_file_metadatas = [{"PhaseEncodingDirection": "j"}] - >>> find_fieldmaps_from_other_dwis(single_dwi_file, single_dwi_file_metadatas) - {'/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz': {}} - - Two scans with the same PE direction: again no opportunities to SDC - >>> repeat_dwi_files = ["/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz"] - >>> repeat_dwi_file_metadatas = [{"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j"}] - >>> find_fieldmaps_from_other_dwis(repeat_dwi_files, - ... repeat_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz': {}, - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz': {}} - - Paired scans, each in opposite PE directions - >>> paired_dwi_files = [ - ... "/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz"] - >>> paired_dwi_file_metadatas = [ - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j-"}] - >>> find_fieldmaps_from_other_dwis(paired_dwi_files, - ... paired_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz']}} - - Multiple scans in multiple PE directions - >>> multi_dwi_files = [ - ... "/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz"] - >>> multi_dwi_file_metadatas = [ - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j"}, - ... {"PhaseEncodingDirection": "j-"}, - ... {"PhaseEncodingDirection": "j-"}] - >>> find_fieldmaps_from_other_dwis(multi_dwi_files, - ... multi_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz']}, - '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz': {'suffix': 'dwi', - 'dwi': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz']}} - - No information available - >>> empty_dwi_files = [ - ... "/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz", - ... "/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz"] - >>> empty_dwi_file_metadatas = [ - ... {}, - ... {}] - >>> find_fieldmaps_from_other_dwis(empty_dwi_files, - ... empty_dwi_file_metadatas) # doctest: +NORMALIZE_WHITESPACE - {'/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz': {}, - '/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz': {}} - """ - - scans_to_pe_dirs = { - fname: meta.get("PhaseEncodingDirection", "None") - for fname, meta in zip(dwi_files, dwi_file_metadatas) - } - pe_dirs_to_scans = defaultdict(list) - for scan_name, scan_dir in scans_to_pe_dirs.items(): - pe_dirs_to_scans[scan_dir].append(scan_name) - - dwi_series_fieldmaps = {} - for dwi_file in dwi_files: - dwi_series_fieldmaps[dwi_file] = {} - pe_dir = scans_to_pe_dirs[dwi_file] - # if there is no information, don't assume it's ok to combine - if pe_dir is None: - continue - - opposite_pe = pe_dir[0] if pe_dir.endswith("-") else pe_dir + "-" - rpe_dwis = pe_dirs_to_scans[opposite_pe] - - if rpe_dwis: - dwi_series_fieldmaps[dwi_file] = {"suffix": "dwi", "dwi": sorted(rpe_dwis)} - - return dwi_series_fieldmaps - - -def split_by_phase_encoding_direction(dwi_files, metadatas): - """If no fieldmaps have been found for a group of dwi files, split them by PE direction. - - Parameters - ---------- - dwi_files : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - metadatas : :obj:`list` of :obj:`dict` - A list of dictionaries containing metadata for each dwi file. - The only field that is used i "PhaseEncodingDirection". - - Returns - ------- - dwi_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. Each dictionary - has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - Examples - -------- - - One of each direction (Not likely to see in the wild) - >>> dwi_files = [ - ... '/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-LR_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-IS_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-SI_dwi.nii.gz' - ... ] - >>> metadatas = [ - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j-'}, - ... {'PhaseEncodingDirection': 'i'}, - ... {'PhaseEncodingDirection': 'i-'}, - ... {'PhaseEncodingDirection': 'k'}, - ... {'PhaseEncodingDirection': 'k-'} - ... ] - >>> split_by_phase_encoding_direction(dwi_files, metadatas) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'i', - 'concatenated_bids_name': 'sub-1_dir-RL'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-LR_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'i-', - 'concatenated_bids_name': 'sub-1_dir-LR'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-IS_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'k', - 'concatenated_bids_name': 'sub-1_dir-IS'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-SI_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'k-', - 'concatenated_bids_name': 'sub-1_dir-SI'}] - - Repeats of some: - >>> dwi_files = [ - ... '/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz' - ... ] - >>> metadatas = [ - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j-'}, - ... {'PhaseEncodingDirection': 'i'} - ... ] - >>> split_by_phase_encoding_direction(dwi_files, metadatas) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['/data/sub-1/dwi/sub-1_dir-RL_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'i', - 'concatenated_bids_name': 'sub-1_dir-RL'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - - Some missing metadata - >>> dwi_files = [ - ... '/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-3_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-4_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_run-5_dwi.nii.gz' - ... ] - >>> metadatas = [ - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j'}, - ... {'PhaseEncodingDirection': 'j-'}, - ... {}, - ... {} - ... ] - >>> split_by_phase_encoding_direction(dwi_files, metadatas) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['/data/sub-1/dwi/sub-1_run-1_dwi.nii.gz', - '/data/sub-1/dwi/sub-1_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_run-3_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_run-3'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_run-4_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-4'}, - {'dwi_series': ['/data/sub-1/dwi/sub-1_run-5_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-5'}] - """ - pe_dir_groups = defaultdict(list) - unknowns = [] - for dwi_file, meta in zip(dwi_files, metadatas): - pe_dir = meta.get("PhaseEncodingDirection") - if pe_dir: - pe_dir_groups[pe_dir].append(dwi_file) - else: - unknowns.append(dwi_file) - - dwi_groups = [] - for pe_dir, dwi_group in sorted(pe_dir_groups.items()): - dwi_groups.append( - { - "dwi_series": dwi_group, - "fieldmap_info": {"suffix": None}, - "dwi_series_pedir": pe_dir, - "concatenated_bids_name": get_concatenated_bids_name(dwi_group), - } - ) - for unknown in unknowns: - dwi_groups.append( - { - "dwi_series": [unknown], - "fieldmap_info": {"suffix": None}, - "dwi_series_pedir": "", - "concatenated_bids_name": get_concatenated_bids_name([unknown]), - } - ) - - return dwi_groups - - -def group_by_warpspace(dwi_files, layout, ignore_fieldmaps): - """Groups a session's DWI files by their acquisition parameters. - - DWIs are grouped by their **warped space**. Two DWI series that are - listed in the IntendedFor field of a fieldmap are assumed to have the same - susceptibility distortions and therefore be in the same warped space. The goal - of this function is to combine DWI series into groups of acquisitions that - are in the same warped space into a list of scans that can be combined after - unwarping. - - Parameters - ---------- - dwi_files : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree - layout : :obj:`pybids.BIDSLayout` - A representation of the BIDS tree - ignore_fieldmaps : :obj:`bool` - If True, ignore any fieldmaps in the ``fmap/`` directory. Images in - ``dwi/`` will still be considered for SDC. - - Returns - ------- - dwi_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. Each dictionary - has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - Examples - -------- - - Set up tests - >>> from qsirecon.utils.bids import collect_data - >>> SUBJECT_ID = "1" - - No fieldmap data, a single DWI series - >>> subject_data, layout = collect_data("easy", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['...sub-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1'}] - - Two DWIs with the same PE direction, to be concatenated - >>> subject_data, layout = collect_data("concat1", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../concat1/sub-1/dwi/sub-1_run-01_dwi.nii.gz', - '.../concat1/sub-1/dwi/sub-1_run-02_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1'}] - - Two DWI series intended to SDC each other - >>> subject_data, layout = collect_data("opposite", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - - Multiple DWI series in two different PE directions - >>> subject_data, layout = collect_data("opposite_concat", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - - A phasediff fieldmap defines the warped group - >>> subject_data, layout = collect_data("phasediff", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../phasediff/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../phasediff/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'phasediff': '.../phasediff/sub-1/fmap/sub-1_phasediff.nii.gz', - 'magnitude1': '.../magnitude1/sub-1/fmap/sub-1_magnitude1.nii.gz', - 'suffix': 'phasediff'}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}] - - Two DWI series, each with its own fieldmap/warped space - >>> subject_data, layout = collect_data("separate_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../separate_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../separate_fmaps/sub-1/fmap/sub-1_dir-PA_run-2_epi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-2'}] - - Same as above but ignoring fieldmaps. Data gets concatenated - >>> subject_data, layout = collect_data("separate_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, True) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../separate_fmaps/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}] - - Two DWI series, opposite PE directions, dedicated EPI fieldmap for each - >>> subject_data, layout = collect_data("mixed_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'epi', - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - - Same as last one, but ignore fieldmaps. The DWI series will be used for SDC instead - >>> subject_data, layout = collect_data("mixed_fmaps", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, True) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - - - There is no metadata related to epi distortion: don't concatenate anything - >>> subject_data, layout = collect_data("missing_info", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../missing_info/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - {'dwi_series': ['.../missing_info/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - - A bizarre mix of PE directions and some missing data - >>> subject_data, layout = collect_data("wtf", SUBJECT_ID) - >>> group_by_warpspace( - ... subject_data['dwi'], layout, False) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS - [{'dwi_series': ['.../wtf/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../wtf/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_dir-IS_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': 'k-', - 'concatenated_bids_name': 'sub-1_dir-IS'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_run-1_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-1'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': None}, - 'dwi_series_pedir': '', - 'concatenated_bids_name': 'sub-1_run-2'}, - {'dwi_series': ['.../wtf/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'fieldmap_info': {'suffix': 'dwi', - 'dwi': ['.../wtf/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../wtf/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz']}, - 'dwi_series_pedir': 'j-', - 'concatenated_bids_name': 'sub-1_dir-PA'}] - """ - # For doc-building - if layout is None: - LOGGER.warning("Assuming we're building docs") - return [ - { - "dwi_series": dwi_files, - "fieldmap_info": {"suffix": None}, - "dwi_series_pedir": "j", - "concatenated_bids_name": "sub-1", - } - ] - - # Get the metadata from every dwi file - dwi_metadatas = [layout.get_metadata(dwi_file) for dwi_file in dwi_files] - # Check for any data in dwi/ that could be used for distortion correction - dwi_series_fieldmaps = find_fieldmaps_from_other_dwis(dwi_files, dwi_metadatas) - - # Find the best fieldmap for each file. - best_fieldmap = {} - grouped_by_fmap = defaultdict(list) - for dwi_file in dwi_files: - all_fmaps = [dwi_series_fieldmaps[dwi_file]] - if not ignore_fieldmaps: - fmap_fmaps = layout.get_fieldmap(dwi_file, return_list=True) - all_fmaps += fmap_fmaps - - # Find the highest priority fieldmap for this dwi file - best_fmap = get_highest_priority_fieldmap(all_fmaps) - best_fieldmap[dwi_file] = best_fmap - - # Add the dwi file to a list of those corrected by this fieldmap - fmap_key = tuple(best_fmap[best_fmap["suffix"]]) if best_fmap["suffix"] else "None" - grouped_by_fmap[fmap_key].append(dwi_file) - - # Create the final groups - dwi_groups = [] - for fmap_key, dwi_group in grouped_by_fmap.items(): - if fmap_key == "None": - dwi_groups.extend( - split_by_phase_encoding_direction( - dwi_group, [layout.get_metadata(dwi_file) for dwi_file in dwi_group] - ) - ) - else: - example_dwi_file = dwi_group[0] - pe_direction = layout.get_metadata(example_dwi_file).get("PhaseEncodingDirection") - dwi_groups.append( - { - "dwi_series": dwi_group, - "fieldmap_info": best_fieldmap[example_dwi_file], - "dwi_series_pedir": pe_direction, - "concatenated_bids_name": get_concatenated_bids_name(dwi_group), - } - ) - - return dwi_groups - - -def merge_dwi_groups(dwi_groups_plus, dwi_groups_minus): - """Convert two dwi groups into a single group that will be concatenated for FSL. - - Parameters - ---------- - dwi_groups_plus : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - Each dictionary has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - dwi_groups_minus : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - Each dictionary has the same keys as ``dwi_groups_plus``. - - Returns - ------- - merged_group : :obj:`dict` - A dictionary describing the merged group of dwi files. - The dictionary has the same keys as ``dwi_groups_plus``. - - Examples - -------- - - Set up tests - >>> SUBJECT_ID = "1" - - AP/PA fieldmaps and paired DWI series - >>> plus_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}] - >>> minus_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - >>> merge_dwi_groups(plus_groups, minus_groups) # doctest: +NORMALIZE_WHITESPACE - {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz', - '.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'} - - Two series SDC each other - >>> plus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}] - >>> minus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA'}] - >>> merge_dwi_groups(plus_groups, minus_groups) # doctest: +NORMALIZE_WHITESPACE - {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'} - - An odd case: one has an EPI - >>> plus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}] - >>> minus_groups = [ - ... {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA'}] - >>> merge_dwi_groups(plus_groups, minus_groups) # doctest: +NORMALIZE_WHITESPACE - {'dwi_series': ['.../opposite/sub-1/dwi/sub-1_dir-AP_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../opposite/sub-1/dwi/sub-1_dir-PA_dwi.nii.gz'], - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'} - - """ - dwi_files = [] - rpe_files = [] - fmap_files = [] - - for dwi_group in dwi_groups_plus: - dwi_files += dwi_group["dwi_series"] - fmap_type = dwi_group["fieldmap_info"].get("suffix") - if fmap_type == "dwi": - rpe_files += dwi_group["fieldmap_info"]["dwi"] - elif fmap_type == "epi": - fmap_files += dwi_group["fieldmap_info"]["epi"] - pe_dir = dwi_group["dwi_series_pedir"] - - for dwi_group in dwi_groups_minus: - rpe_files += dwi_group["dwi_series"] - fmap_type = dwi_group["fieldmap_info"].get("suffix") - if fmap_type == "dwi": - dwi_files += dwi_group["fieldmap_info"]["dwi"] - elif fmap_type == "epi": - fmap_files += dwi_group["fieldmap_info"]["epi"] - - dwi_files = sorted(set(dwi_files)) - rpe_files = sorted(set(rpe_files)) - fmap_files = sorted(set(fmap_files)) - fieldmap_info = {"suffix": "rpe_series", "rpe_series": rpe_files} - if fmap_files: - fieldmap_info["epi"] = fmap_files - - merged_group = { - "dwi_series": dwi_files, - "dwi_series_pedir": pe_dir, - "fieldmap_info": fieldmap_info, - "concatenated_bids_name": get_concatenated_bids_name(dwi_files + rpe_files), - } - return merged_group - - -def group_for_eddy(all_dwi_fmap_groups): - """Find matched pairs of phase encoding directions that can be combined for TOPUP/eddy. - - Any groups that don't have a phase encoding direction won't be correctable by eddy/TOPUP. - - Parameters - ---------- - all_dwi_fmap_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - - Returns - ------- - eddy_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - Each dictionary has the following keys: - - - ``dwi_series``: A list of full paths to dwi nifti files in a BIDS tree. - - ``fieldmap_info``: A dictionary describing the fieldmap. - If no fieldmap is found, the dictionary will be empty. - - ``dwi_series_pedir``: The phase encoding direction of the dwi series. - If no information is available, the value will be an empty string. - - ``concatenated_bids_name``: The BIDS name of the concatenated dwi series. - If no information is available, the value will be an empty string. - - Examples - -------- - - Paired DWI series to correct each other: - >>> dwi_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'}] - - AP/PA EPI fieldmaps - >>> dwi_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP_run-1'}, - ... {'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'epi', - ... 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA_run-2'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../mixed_fmaps/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - 'epi': ['.../mixed_fmaps/sub-1/fmap/sub-1_dir-AP_run-2_epi.nii.gz', - '.../mixed_fmaps/sub-1/fmap/sub-1_dir-PA_run-1_epi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'}] - - Repeated scans per PE direction - >>> dwi_groups = [ - ... {'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}, - ... {'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_dir-PA'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'dwi_series_pedir': 'j', - 'fieldmap_info': {'suffix': 'rpe_series', - 'rpe_series': ['.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - '.../opposite_concat/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz']}, - 'concatenated_bids_name': 'sub-1'}] - - A phasediff fieldmap (Not used by eddy) - >>> dwi_groups = [ - ... {'dwi_series': ['.../phasediff/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '.../phasediff/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'phasediff': '.../phasediff/sub-1/fmap/sub-1_phasediff.nii.gz', - ... 'magnitude1': '.../magnitude1/sub-1/fmap/sub-1_magnitude1.nii.gz', - ... 'suffix': 'phasediff'}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_dir-AP'}] - >>> group_for_eddy(dwi_groups) # doctest: +NORMALIZE_WHITESPACE - [{'dwi_series': ['.../phasediff/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - '.../phasediff/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz'], - 'fieldmap_info': {'phasediff': '.../phasediff/sub-1/fmap/sub-1_phasediff.nii.gz', - 'magnitude1': '.../magnitude1/sub-1/fmap/sub-1_magnitude1.nii.gz', - 'suffix': 'phasediff'}, - 'dwi_series_pedir': 'j', - 'concatenated_bids_name': 'sub-1_dir-AP'}] - - """ - eddy_dwi_groups = [] - eddy_compatible_suffixes = ("dwi", "epi") - session_groups = _group_by_sessions(all_dwi_fmap_groups) - for _, dwi_fmap_groups in session_groups.items(): - for pe_dir in "ijk": - plus_series = [ - dwi_group - for dwi_group in dwi_fmap_groups - if dwi_group.get("dwi_series_pedir") == pe_dir - and dwi_group["fieldmap_info"].get("suffix") in eddy_compatible_suffixes - ] - minus_series = [ - dwi_group - for dwi_group in dwi_fmap_groups - if dwi_group.get("dwi_series_pedir") == pe_dir + "-" - and dwi_group["fieldmap_info"].get("suffix") in eddy_compatible_suffixes - ] - - # Can these be grouped? - if plus_series and minus_series: - eddy_dwi_groups.append(merge_dwi_groups(plus_series, minus_series)) - else: - eddy_dwi_groups.extend(plus_series + minus_series) - - # Add separate groups for non-compatible fieldmaps - for dwi_group in dwi_fmap_groups: - if dwi_group["fieldmap_info"].get("suffix") not in eddy_compatible_suffixes: - eddy_dwi_groups.append(dwi_group) - - return eddy_dwi_groups, { - group["concatenated_bids_name"]: group["concatenated_bids_name"] - for group in eddy_dwi_groups - } - - -def group_for_concatenation(all_dwi_fmap_groups): - """Find matched pairs of phase encoding directions that can be combined after SHORELine. - - Any groups that don't have a phase encoding direction won't be correctable by SHORELine. - - Parameters - ---------- - all_dwi_fmap_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - - Returns - ------- - concatenation_grouping : :obj:`dict` - A dictionary mapping the concatenated BIDS name of each group to the name of the - group that it should be concatenated with. - """ - concatenation_grouping = {} - session_groups = _group_by_sessions(all_dwi_fmap_groups) - for _, dwi_fmap_groups in session_groups.items(): - all_images = [] - for group in dwi_fmap_groups: - all_images.extend(group["dwi_series"]) - group_name = get_concatenated_bids_name(all_images) - # Add separate groups for non-compatible fieldmaps - for group in dwi_fmap_groups: - concatenation_grouping[group["concatenated_bids_name"]] = group_name - - return concatenation_grouping - - -def get_concatenated_bids_name(dwi_group): - """Derive the output file name for a group of dwi files. - - Strip away non-shared key/values from the input list of files. This function - assumes you have already split the dwi group into something meaningful and - really want to combine all the inputs. - - Parameters - ---------- - dwi_group : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - - Returns - ------- - fname : :obj:`str` - The BIDS name of the concatenated dwi series. - - Examples - -------- - >>> get_concatenated_bids_name([ - ... '/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-3_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-4_dwi.nii.gz' - ... ]) - 'sub-1_dir-AP' - - >>> get_concatenated_bids_name([ - ... '/data/sub-1/dwi/sub-1_dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_dir-PA_run-2_dwi.nii.gz' - ... ]) - 'sub-1' - - - >>> get_concatenated_bids_name([ - ... '/data/sub-1/dwi/sub-1_acq-HCP-dir-AP_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_acq-HCP_dir-AP_run-2_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_acq-HCP_dir-PA_run-1_dwi.nii.gz', - ... '/data/sub-1/dwi/sub-1_acq-HCP_dir-PA_run-2_dwi.nii.gz' - ... ]) - 'sub-1_acq-HCP' - - """ - # If a single file, use its name, otherwise use the common prefix - if len(dwi_group) > 1: - fname = _get_common_bids_fields(dwi_group) - parts = fname.split("_") - full_parts = [part for part in parts if not part.endswith("-")] - fname = "_".join(full_parts) - else: - input_fname = dwi_group[0] - fname = split_filename(input_fname)[1] - - if fname.endswith("_dwi"): - fname = fname[:-4] - - return fname.replace(".", "").replace(" ", "") - - -def _get_common_bids_fields(fnames): - """Find the common fields in a list of BIDS filenames. - - Parameters - ---------- - fnames : :obj:`list` of :obj:`str` - A list of full paths to dwi nifti files in a BIDS tree. - - Returns - ------- - fname : :obj:`str` - The common fields in the filenames. - """ - bids_keys = defaultdict(set) - for fname in fnames: - basename = split_filename(fname)[1] - for token in basename.split("_"): - parts = token.split("-") - if len(parts) == 2: - key, value = parts - bids_keys[key].update((value,)) - - # Find all the keys with a single unique value - common_bids = [] - for key in ["sub", "ses", "acq", "dir", "run"]: - if len(bids_keys[key]) == 1: - common_bids.append(key + "-" + bids_keys[key].pop()) - - return "_".join(common_bids) - - -def _group_by_sessions(dwi_fmap_groups): - """Create a lookup of distortion groups by session - - Parameters - ---------- - dwi_fmap_groups : :obj:`list` of :obj:`dict` - A list of dictionaries describing each group of dwi files. - - Returns - ------- - ses_lookup : :obj:`dict` - A dictionary mapping session ids to lists of dwi_fmap_groups. - - Examples - -------- - Paired DWI series to correct each other: - >>> dwi_groups = [ - ... {'dwi_series': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-AP_run-1_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_ses-1_dir-AP_run-1'}, - ... {'dwi_series': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../mixed_fmaps/sub-1/ses-1/dwi/sub-1_ses-1_dir-AP_run-1_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_ses-1_dir-PA_run-2'}, - ... {'dwi_series': [ - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2/dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2/dir-AP_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j', - ... 'concatenated_bids_name': 'sub-1_ses-2_dir-AP'}, - ... {'dwi_series': [ - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-PA_run-2_dwi.nii.gz'], - ... 'fieldmap_info': {'suffix': 'dwi', - ... 'dwi': ['.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-AP_run-1_dwi.nii.gz', - ... '.../opposite_concat/sub-1/ses-2/dwi/sub-1_ses-2_dir-AP_run-2_dwi.nii.gz']}, - ... 'dwi_series_pedir': 'j-', - ... 'concatenated_bids_name': 'sub-1_ses-2_dir-PA'}] - """ - ses_lookup = defaultdict(list) - for group in dwi_fmap_groups: - bids_info = get_bids_params(group["concatenated_bids_name"]) - ses_lookup[bids_info["session_id"]].append(group) - - return ses_lookup diff --git a/qsirecon/utils/ingress.py b/qsirecon/utils/ingress.py index 2bf58c6a..0a1ba58e 100644 --- a/qsirecon/utils/ingress.py +++ b/qsirecon/utils/ingress.py @@ -42,59 +42,6 @@ def missing_from_ukb_directory(ukb_subject_dir): return [str(fpath) for fpath in required_files if not fpath.exists()] -def find_ukb_directory(ukb_directory_list, subject_id): - """Find a UKB directory for a given subject ID. - - Parameters - ---------- - ukb_directory_list : :obj:`list` of :obj:`pathlib.Path` - A list of ukb directories to search. - subject_id : :obj:`str` - The subject ID to search for. - - Returns - ------- - ukb_directory : :obj:`pathlib.Path` - The path to the ukb directory. - """ - potential_directories = [ - subdir for subdir in ukb_directory_list if subdir.name.startswith(subject_id) - ] - - # If nothing starts with the subject id, then we're out of luck - if not potential_directories: - raise Exception(f"No UKB directory available for {subject_id}") - - complete_dirs = [] - for potential_directory in potential_directories: - missing_files = missing_from_ukb_directory(potential_directory) - if not missing_files: - complete_dirs.append(potential_directory) - - # Too many complete matches: ambiguous subject ID - if len(complete_dirs) > 1: - raise Exception( - "Provide a more specific subject filter: More than 1 directories match " - + subject_id - + "\n" - + "\n".join(map(str, complete_dirs)) - ) - - # There were potential directories, but none were complete - if not complete_dirs: - error_report = "\n".join( - [ - str(pdir.absolute()) - + " missing:\n " - + "\n ".join(missing_from_ukb_directory(pdir)) - for pdir in potential_directories - ] - ) - raise Exception(f"No complete directories found for {subject_id}:\n{error_report}") - - return - - def create_ukb_layout(ukb_dir, participant_label=None): """Find all valid ukb directories under ukb_dir. diff --git a/qsirecon/utils/misc.py b/qsirecon/utils/misc.py index 72ef65b9..5781bd78 100644 --- a/qsirecon/utils/misc.py +++ b/qsirecon/utils/misc.py @@ -13,60 +13,3 @@ def check_deps(workflow): for node in workflow._get_all_nodes() if (hasattr(node.interface, "_cmd") and which(node.interface._cmd.split()[0]) is None) ) - - -def fix_multi_T1w_source_name(in_files): - """Make up a generic source name when there are multiple T1s. - - >>> fix_multi_T1w_source_name([ - ... '/path/to/sub-045_ses-test_T1w.nii.gz', - ... '/path/to/sub-045_ses-retest_T1w.nii.gz']) - '/path/to/sub-045_T1w.nii.gz' - """ - import os - - from nipype.utils.filemanip import filename_to_list - - base, in_file = os.path.split(filename_to_list(in_files)[0]) - subject_label = in_file.split("_", 1)[0].split("-")[1] - return os.path.join(base, f"sub-{subject_label}_T1w.nii.gz") - - -def fix_multi_source_name(in_files, dwi_only, anatomical_contrast="T1w"): - """Make up a generic source name when there are multiple source files. - - >>> fix_multi_source_name([ - ... '/path/to/sub-045_ses-test_T1w.nii.gz', - ... '/path/to/sub-045_ses-retest_T1w.nii.gz']) - '/path/to/sub-045_T1w.nii.gz' - """ - import os - - from nipype.utils.filemanip import filename_to_list - - base, in_file = os.path.split(filename_to_list(in_files)[0]) - subject_label = in_file.split("_", 1)[0].split("-")[1] - if dwi_only: - anatomical_contrast = "dwi" - base = base.replace("/dwi", "/anat") - - return os.path.join(base, f"sub-{subject_label}_{anatomical_contrast}.nii.gz") - - -def add_suffix(in_files, suffix): - """Wrap nipype's fname_presuffix to conveniently just add a suffixfix. - - >>> add_suffix([ - ... '/path/to/sub-045_ses-test_T1w.nii.gz', - ... '/path/to/sub-045_ses-retest_T1w.nii.gz'], '_test') - 'sub-045_ses-test_T1w_test.nii.gz' - """ - import os.path as op - - from nipype.utils.filemanip import filename_to_list, fname_presuffix - - return op.basename(fname_presuffix(filename_to_list(in_files)[0], suffix=suffix)) - - -if __name__ == "__main__": - pass diff --git a/qsirecon/utils/testing.py b/qsirecon/utils/testing.py index ec745654..b59c3b35 100644 --- a/qsirecon/utils/testing.py +++ b/qsirecon/utils/testing.py @@ -8,11 +8,8 @@ """ -import json import logging -import tempfile import unittest -from pathlib import Path from networkx.exception import NetworkXUnfeasible from nipype.interfaces import utility as niu @@ -106,205 +103,3 @@ def assert_inputs_set(self, workflow, additional_inputs={}): with self.assertRaises(Exception): # throws an error if the input is already connected workflow.connect([(dummy_node, node, [("dummy", field)])]) - - -def get_grouping_test_data(): - """Write a number of grouping test datasets to base_path.""" - - dataset_desctiption = { - "Acknowledgements": "", - "Authors": [], - "BIDSVersion": "1.0.2", - "DatasetDOI": "", - "Funding": "", - "HowToAcknowledge": "", - "License": "", - "Name": "test_data", - "ReferencesAndLinks": [], - "template": "project", - } - - base_dir = tempfile.mkdtemp() - empty_bids_dir = Path(base_dir) / "empty_bids" - empty_bids_dir.mkdir(parents=True, exist_ok=True) - - def write_json(pth, content): - with pth.open("w") as f: - json.dump(content, f) - - def make_empty_bids(root, project_name): - project_root = root / project_name - project_root.mkdir(parents=True, exist_ok=True) - (project_root / "README").touch() - write_json(project_root / "dataset_description.json", dataset_desctiption) - (project_root / "sub-1" / "dwi").mkdir(parents=True, exist_ok=True) - (project_root / "sub-1" / "fmap").mkdir(parents=True, exist_ok=True) - (project_root / "sub-1" / "anat").mkdir(parents=True, exist_ok=True) - return project_root / "sub-1" - - def write_test_bids(name, files_and_metas): - test_bids = make_empty_bids(empty_bids_dir, name) - for fname, meta in files_and_metas: - _nifti = fname + ".nii.gz" - _json = fname + ".json" - (test_bids / _nifti).touch() - write_json(test_bids / _json, meta) - return test_bids.parent - - # One dwi, no fmaps - write_test_bids("easy", [("dwi/sub-1_dwi", {"PhaseEncodingDirection": "j"})]) - - write_test_bids( - "concat1", - [ - ("dwi/sub-1_run-01_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_run-02_dwi", {"PhaseEncodingDirection": "j"}), - ], - ) - - write_test_bids( - "opposite", - [ - ("dwi/sub-1_dir-AP_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_dwi", {"PhaseEncodingDirection": "j-"}), - ], - ) - - write_test_bids( - "opposite_concat", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_run-1_dwi", {"PhaseEncodingDirection": "j-"}), - ("dwi/sub-1_dir-PA_run-2_dwi", {"PhaseEncodingDirection": "j-"}), - ], - ) - - write_test_bids( - "phasediff", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ("fmap/sub-1_magnitude1", {"PhaseEncodingDirection": "j"}), - ("fmap/sub-1_magnitude2", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_phasediff", - { - "PhaseEncodingDirection": "j", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ], - ) - - write_test_bids( - "epi", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_dir-PA_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ], - ) - - write_test_bids( - "separate_fmaps", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_dir-PA_run-1_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": ["dwi/sub-1_dir-AP_run-1_dwi.nii.gz"], - }, - ), - ( - "fmap/sub-1_dir-PA_run-2_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": ["dwi/sub-1_dir-AP_run-2_dwi.nii.gz"], - }, - ), - ], - ) - - write_test_bids( - "mixed_fmaps", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_run-2_dwi", {"PhaseEncodingDirection": "j-"}), - ( - "fmap/sub-1_dir-PA_run-1_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": ["dwi/sub-1_dir-AP_run-1_dwi.nii.gz"], - }, - ), - ( - "fmap/sub-1_dir-AP_run-2_epi", - { - "PhaseEncodingDirection": "j", - "IntendedFor": ["dwi/sub-1_dir-PA_run-2_dwi.nii.gz"], - }, - ), - ], - ) - - write_test_bids( - "missing_info", [("dwi/sub-1_dir-AP_run-1_dwi", {}), ("dwi/sub-1_dir-PA_run-2_dwi", {})] - ) - - write_test_bids( - "wtf", - [ - ("dwi/sub-1_run-1_dwi", {}), - ("dwi/sub-1_run-2_dwi", {}), - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-PA_run-1_dwi", {"PhaseEncodingDirection": "j-"}), - ("dwi/sub-1_dir-PA_run-2_dwi", {"PhaseEncodingDirection": "j-"}), - ("dwi/sub-1_dir-IS_dwi", {"PhaseEncodingDirection": "k-"}), - ], - ) - - write_test_bids( - "appa_fmaps", - [ - ("dwi/sub-1_dir-AP_run-1_dwi", {"PhaseEncodingDirection": "j"}), - ("dwi/sub-1_dir-AP_run-2_dwi", {"PhaseEncodingDirection": "j"}), - ( - "fmap/sub-1_dir-PA_run-1_epi", - { - "PhaseEncodingDirection": "j-", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ( - "fmap/sub-1_dir-AP_run-2_epi", - { - "PhaseEncodingDirection": "j", - "IntendedFor": [ - "dwi/sub-1_dir-AP_run-1_dwi.nii.gz", - "dwi/sub-1_dir-AP_run-2_dwi.nii.gz", - ], - }, - ), - ], - ) - - return empty_bids_dir diff --git a/qsirecon/viz/utils.py b/qsirecon/viz/utils.py index a312f2c2..7e5972e1 100644 --- a/qsirecon/viz/utils.py +++ b/qsirecon/viz/utils.py @@ -103,57 +103,6 @@ def plot_denoise( return out_files -def plot_acpc( - acpc_registered_img, - div_id, - plot_params=None, - order=("z", "x", "y"), - cuts=None, - estimate_brightness=False, - label=None, - compress="auto", -): - """ - Plot the results of an AC-PC transformation. - """ - plot_params = plot_params or {} - - # Do the low-b image first - out_files = [] - if estimate_brightness: - plot_params = robust_set_limits( - acpc_registered_img.get_fdata(dtype="float32").reshape(-1), plot_params - ) - - # Plot each cut axis for low-b - for i, mode in enumerate(list(order)): - plot_params["display_mode"] = mode - plot_params["cut_coords"] = [-20.0, 0.0, 20.0] - if i == 0: - plot_params["title"] = label - else: - plot_params["title"] = None - - # Generate nilearn figure - display = plot_anat(acpc_registered_img, **plot_params) - for coord, axis in display.axes.items(): - axis.ax.axvline(0, lw=1) - axis.ax.axhline(0, lw=1) - svg = extract_svg(display, compress=compress) - display.close() - - # Find and replace the figure_1 id. - xml_data = etree.fromstring(svg) - find_text = etree.ETXPath("//{%s}g[@id='figure_1']" % SVGNS) - find_text(xml_data)[0].set("id", "%s-%s-%s" % (div_id, mode, uuid4())) - - svg_fig = SVGFigure() - svg_fig.root = xml_data - out_files.append(svg_fig) - - return out_files - - def slices_from_bbox(mask_data, cuts=3, padding=0): """Finds equi-spaced cuts for presenting images""" B = np.argwhere(mask_data > 0) diff --git a/qsirecon/workflows/anatomical/__init__.py b/qsirecon/workflows/anatomical/__init__.py deleted file mode 100644 index 11622373..00000000 --- a/qsirecon/workflows/anatomical/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .volume import init_anat_preproc_wf, init_synthseg_wf, init_synthstrip_wf diff --git a/qsirecon/workflows/anatomical/surface.py b/qsirecon/workflows/anatomical/surface.py deleted file mode 100644 index 5a6a9e2a..00000000 --- a/qsirecon/workflows/anatomical/surface.py +++ /dev/null @@ -1,791 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -Anatomical reference preprocessing workflows -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. autofunction:: init_anat_preproc_wf -.. autofunction:: init_skullstrip_ants_wf - -""" -from nipype import logging -from nipype.interfaces import freesurfer as fs -from nipype.interfaces import io as nio -from nipype.interfaces import utility as niu -from nipype.pipeline import engine as pe -from niworkflows.engine.workflows import LiterateWorkflow as Workflow -from niworkflows.interfaces.freesurfer import RobustRegister -from niworkflows.interfaces.reportlets.segmentation import ReconAllRPT - -from ...interfaces import DerivativesDataSink as FDerivativesDataSink -from ...interfaces import ( - FSDetectInputs, - FSInjectBrainExtracted, - MakeMidthickness, - NormalizeSurf, - RefineBrainMask, -) -from ...interfaces.freesurfer import PatchedLTAConvert as LTAConvert -from ...utils.misc import fix_multi_T1w_source_name - -# from pkg_resources import resource_filename as pkgr - - -LOGGER = logging.getLogger("nipype.workflow") - - -class DerivativesDataSink(FDerivativesDataSink): - out_path_base = "qsirecon" - - -TEMPLATE_MAP = { - "MNI152NLin2009cAsym": "mni_icbm152_nlin_asym_09c", -} - - -def init_surface_recon_wf(omp_nthreads, hires, name="surface_recon_wf"): - r""" - This workflow reconstructs anatomical surfaces using FreeSurfer's ``recon-all``. - Reconstruction is performed in three phases. - The first phase initializes the subject with T1w and T2w (if available) - structural images and performs basic reconstruction (``autorecon1``) with the - exception of skull-stripping. - For example, a subject with only one session with T1w and T2w images - would be processed by the following command:: - $ recon-all -sd /freesurfer -subjid sub- \ - -i /sub-/anat/sub-_T1w.nii.gz \ - -T2 /sub-/anat/sub-_T2w.nii.gz \ - -autorecon1 \ - -noskullstrip - The second phase imports an externally computed skull-stripping mask. - This workflow refines the external brainmask using the internal mask - implicit the the FreeSurfer's ``aseg.mgz`` segmentation, - to reconcile ANTs' and FreeSurfer's brain masks. - First, the ``aseg.mgz`` mask from FreeSurfer is refined in two - steps, using binary morphological operations: - 1. With a binary closing operation the sulci are included - into the mask. This results in a smoother brain mask - that does not exclude deep, wide sulci. - 2. Fill any holes (typically, there could be a hole next to - the pineal gland and the corpora quadrigemina if the great - cerebral brain is segmented out). - Second, the brain mask is grown, including pixels that have a high likelihood - to the GM tissue distribution: - 3. Dilate and substract the brain mask, defining the region to search for candidate - pixels that likely belong to cortical GM. - 4. Pixels found in the search region that are labeled as GM by ANTs - (during ``antsBrainExtraction.sh``) are directly added to the new mask. - 5. Otherwise, estimate GM tissue parameters locally in patches of ``ww`` size, - and test the likelihood of the pixel to belong in the GM distribution. - This procedure is inspired on mindboggle's solution to the problem: - https://github.com/nipy/mindboggle/blob/7f91faaa7664d820fe12ccc52ebaf21d679795e2/mindboggle/guts/segment.py#L1660 - The final phase resumes reconstruction, using the T2w image to assist - in finding the pial surface, if available. - See :py:func:`~qsirecon.workflows.anatomical.init_autorecon_resume_wf` for details. - Memory annotations for FreeSurfer are based off `their documentation - `_. - They specify an allocation of 4GB per subject. Here we define 5GB - to have a certain margin. - .. workflow:: - :graph2use: orig - :simple_form: yes - from qsirecon.workflows.anatomical import init_surface_recon_wf - wf = init_surface_recon_wf(omp_nthreads=1, hires=True) - **Parameters** - omp_nthreads : int - Maximum number of threads an individual process may use - hires : bool - Enable sub-millimeter preprocessing in FreeSurfer - **Inputs** - t1w - List of T1-weighted structural images - t2w - List of T2-weighted structural images (only first used) - flair - List of FLAIR images - skullstripped_t1 - Skull-stripped T1-weighted image (or mask of image) - ants_segs - Brain tissue segmentation from ANTS ``antsBrainExtraction.sh`` - corrected_t1 - INU-corrected, merged T1-weighted image - subjects_dir - FreeSurfer SUBJECTS_DIR - subject_id - FreeSurfer subject ID - **Outputs** - subjects_dir - FreeSurfer SUBJECTS_DIR - subject_id - FreeSurfer subject ID - t1_2_fsnative_forward_transform - LTA-style affine matrix translating from T1w to FreeSurfer-conformed subject space - t1_2_fsnative_reverse_transform - LTA-style affine matrix translating from FreeSurfer-conformed subject space to T1w - surfaces - GIFTI surfaces for gray/white matter boundary, pial surface, - midthickness (or graymid) surface, and inflated surfaces - out_brainmask - Refined brainmask, derived from FreeSurfer's ``aseg`` volume - out_aseg - FreeSurfer's aseg segmentation, in native T1w space - out_aparc - FreeSurfer's aparc+aseg segmentation, in native T1w space - out_report - Reportlet visualizing quality of surface alignment - **Subworkflows** - * :py:func:`~qsirecon.workflows.anatomical.init_autorecon_resume_wf` - * :py:func:`~qsirecon.workflows.anatomical.init_gifti_surface_wf` - """ - - workflow = Workflow(name=name) - workflow.__desc__ = """\ -Brain surfaces were reconstructed using `recon-all` [FreeSurfer {fs_ver}, -RRID:SCR_001847, @fs_reconall], and the brain mask estimated -previously was refined with a custom variation of the method to reconcile -ANTs-derived and FreeSurfer-derived segmentations of the cortical -gray-matter of Mindboggle [RRID:SCR_002438, @mindboggle]. -""".format( - fs_ver=fs.Info().looseversion() or "" - ) - - inputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "t1w", - "t2w", - "flair", - "skullstripped_t1", - "corrected_t1", - "ants_segs", - "subjects_dir", - "subject_id", - ] - ), - name="inputnode", - ) - outputnode = pe.Node( - niu.IdentityInterface( - fields=[ - "subjects_dir", - "subject_id", - "t1_2_fsnative_forward_transform", - "t1_2_fsnative_reverse_transform", - "surfaces", - "out_brainmask", - "out_aseg", - "out_aparc", - "out_report", - ] - ), - name="outputnode", - ) - - recon_config = pe.Node(FSDetectInputs(hires_enabled=hires), name="recon_config") - - autorecon1 = pe.Node( - fs.ReconAll(directive="autorecon1", flags="-noskullstrip", openmp=omp_nthreads), - name="autorecon1", - n_procs=omp_nthreads, - mem_gb=5, - ) - autorecon1.interface._can_resume = False - autorecon1.interface._always_run = True - - skull_strip_extern = pe.Node(FSInjectBrainExtracted(), name="skull_strip_extern") - - fsnative_2_t1_xfm = pe.Node( - RobustRegister(auto_sens=True, est_int_scale=True), name="fsnative_2_t1_xfm" - ) - t1_2_fsnative_xfm = pe.Node(LTAConvert(out_lta=True, invert=True), name="t1_2_fsnative_xfm") - - autorecon_resume_wf = init_autorecon_resume_wf(omp_nthreads=omp_nthreads) - gifti_surface_wf = init_gifti_surface_wf() - - aseg_to_native_wf = init_segs_to_native_wf() - aparc_to_native_wf = init_segs_to_native_wf(segmentation="aparc_aseg") - refine = pe.Node(RefineBrainMask(), name="refine") - - workflow.connect([ - # Configuration - (inputnode, recon_config, [('t1w', 't1w_list'), - ('t2w', 't2w_list'), - ('flair', 'flair_list')]), - # Passing subjects_dir / subject_id enforces serial order - (inputnode, autorecon1, [('subjects_dir', 'subjects_dir'), - ('subject_id', 'subject_id')]), - (autorecon1, skull_strip_extern, [('subjects_dir', 'subjects_dir'), - ('subject_id', 'subject_id')]), - (skull_strip_extern, autorecon_resume_wf, [('subjects_dir', 'inputnode.subjects_dir'), - ('subject_id', 'inputnode.subject_id')]), - (autorecon_resume_wf, gifti_surface_wf, [ - ('outputnode.subjects_dir', 'inputnode.subjects_dir'), - ('outputnode.subject_id', 'inputnode.subject_id')]), - # Reconstruction phases - (inputnode, autorecon1, [('t1w', 'T1_files')]), - (recon_config, autorecon1, [('t2w', 'T2_file'), - ('flair', 'FLAIR_file'), - ('hires', 'hires'), - # First run only (recon-all saves expert options) - ('mris_inflate', 'mris_inflate')]), - (inputnode, skull_strip_extern, [('skullstripped_t1', 'in_brain')]), - (recon_config, autorecon_resume_wf, [('use_t2w', 'inputnode.use_T2'), - ('use_flair', 'inputnode.use_FLAIR')]), - # Construct transform from FreeSurfer conformed image to FMRIPREP - # reoriented image - (inputnode, fsnative_2_t1_xfm, [('t1w', 'target_file')]), - (autorecon1, fsnative_2_t1_xfm, [('T1', 'source_file')]), - (fsnative_2_t1_xfm, gifti_surface_wf, [ - ('out_reg_file', 'inputnode.t1_2_fsnative_reverse_transform')]), - (fsnative_2_t1_xfm, t1_2_fsnative_xfm, [('out_reg_file', 'in_lta')]), - # Refine ANTs mask, deriving new mask from FS' aseg - (inputnode, refine, [('corrected_t1', 'in_anat'), - ('ants_segs', 'in_ants')]), - (inputnode, aseg_to_native_wf, [('corrected_t1', 'inputnode.in_file')]), - (autorecon_resume_wf, aseg_to_native_wf, [ - ('outputnode.subjects_dir', 'inputnode.subjects_dir'), - ('outputnode.subject_id', 'inputnode.subject_id')]), - (inputnode, aparc_to_native_wf, [('corrected_t1', 'inputnode.in_file')]), - (autorecon_resume_wf, aparc_to_native_wf, [ - ('outputnode.subjects_dir', 'inputnode.subjects_dir'), - ('outputnode.subject_id', 'inputnode.subject_id')]), - (aseg_to_native_wf, refine, [('outputnode.out_file', 'in_aseg')]), - - # Output - (autorecon_resume_wf, outputnode, [('outputnode.subjects_dir', 'subjects_dir'), - ('outputnode.subject_id', 'subject_id'), - ('outputnode.out_report', 'out_report')]), - (gifti_surface_wf, outputnode, [('outputnode.surfaces', 'surfaces')]), - (t1_2_fsnative_xfm, outputnode, [('out_lta', 't1_2_fsnative_forward_transform')]), - (fsnative_2_t1_xfm, outputnode, [('out_reg_file', 't1_2_fsnative_reverse_transform')]), - (refine, outputnode, [('out_file', 'out_brainmask')]), - (aseg_to_native_wf, outputnode, [('outputnode.out_file', 'out_aseg')]), - (aparc_to_native_wf, outputnode, [('outputnode.out_file', 'out_aparc')]), - ]) # fmt:skip - - return workflow - - -def init_autorecon_resume_wf(omp_nthreads, name="autorecon_resume_wf"): - r""" - This workflow resumes recon-all execution, assuming the `-autorecon1` stage - has been completed. - In order to utilize resources efficiently, this is broken down into five - sub-stages; after the first stage, the second and third stages may be run - simultaneously, and the fourth and fifth stages may be run simultaneously, - if resources permit:: - $ recon-all -sd /freesurfer -subjid sub- \ - -autorecon2-volonly - $ recon-all -sd /freesurfer -subjid sub- \ - -autorecon-hemi lh \ - -noparcstats -nocortparc2 -noparcstats2 -nocortparc3 \ - -noparcstats3 -nopctsurfcon -nohyporelabel -noaparc2aseg \ - -noapas2aseg -nosegstats -nowmparc -nobalabels - $ recon-all -sd /freesurfer -subjid sub- \ - -autorecon-hemi rh \ - -noparcstats -nocortparc2 -noparcstats2 -nocortparc3 \ - -noparcstats3 -nopctsurfcon -nohyporelabel -noaparc2aseg \ - -noapas2aseg -nosegstats -nowmparc -nobalabels - $ recon-all -sd /freesurfer -subjid sub- \ - -autorecon3 -hemi lh -T2pial - $ recon-all -sd /freesurfer -subjid sub- \ - -autorecon3 -hemi rh -T2pial - The excluded steps in the second and third stages (``-no