diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d3e1591..4e1fa1a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - nxf_ver: ['19.04.0', '20.04.1'] + nxf_ver: ['19.04.0', '20.04.1', '21.10.6'] steps: - uses: actions/checkout@v2 - name: Install Nextflow @@ -18,7 +18,7 @@ jobs: sudo mv nextflow /usr/local/bin/ - name: Pull docker image run: | - docker pull scilus/docker-tractoflow:latest + docker pull scilus/scilus:latest - name: Run pipeline run: | - nextflow run ${GITHUB_WORKSPACE} --help -with-docker scilus/docker-tractoflow:latest + nextflow run ${GITHUB_WORKSPACE} --help -with-docker scilus/scilus:latest diff --git a/README.md b/README.md index e76ec20..049fc3a 100755 --- a/README.md +++ b/README.md @@ -3,10 +3,12 @@ TractoFlow pipeline [![DOI](https://img.shields.io/badge/DOI-10.1016%2Fj.neuroimage.2020.116889-blue)](https://doi.org/10.1016/j.neuroimage.2020.116889) [![GitHub release (latest by date)](https://img.shields.io/github/v/release/scilus/tractoflow)](https://github.com/scilus/tractoflow/releases) -[![Nextflow](https://img.shields.io/badge/nextflow-19.04.0-brightgreen.svg)](https://www.nextflow.io/) +[![Nextflow](https://img.shields.io/badge/nextflow-21.10.6-brightgreen.svg)](https://www.nextflow.io/) ![TractoFlow CI](https://github.com/scilus/tractoflow/workflows/TractoFlow%20CI/badge.svg) [![Documentation Status](https://readthedocs.org/projects/tractoflow-documentation/badge/?version=latest)](https://tractoflow-documentation.readthedocs.io/en/latest/?badge=latest) +[![Docker container badge](https://img.shields.io/docker/v/scilus/scilus?label=docker&logo=docker&logoColor=white)](https://hub.docker.com/r/scilus/scilus) + The TractoFlow pipeline is a fully automated and reproducible dMRI processing pipeline. diff --git a/USAGE b/USAGE index 88ffa1c..5f67daa 100755 --- a/USAGE +++ b/USAGE @@ -175,6 +175,7 @@ OPTIONAL ARGUMENTS (current value) --local_compress_value [LOCAL] Compression error threshold ($local_compress_value). --local_random_seed [LOCAL] List of random seed numbers for the random number generator ($local_random_seed). Please write them as list separated using commat WITHOUT SPACE e.g. (--local_random_seed 0,1,2) +--local_batch_size_gpu [LOCAL-GPU] Approximate size of GPU batches (number of streamlines to track in parallel) ($local_batch_size_gpu). --template_t1 Path to the template T1 directory for antsBrainExtraction. The folder must contain t1_template.nii.gz and t1_brain_probability_map.nii.gz. @@ -193,11 +194,12 @@ OPTIONAL ARGUMENTS (current value) --processes The number of parallel processes to launch ($cpu_count). Only affects the local scheduler. -AVAILABLE PROFILES (using -profile option (e.g. -profile use_cuda,fully_reproducible)) +AVAILABLE PROFILES (using -profile option (e.g. -profile use_gpu,fully_reproducible)) macos When this profile is used, TractoFlow will modify a parameter (scratch) for MacOS users. -use_cuda When this profile is used, TractoFlow will use eddy_cuda for Eddy process. +use_gpu When this profile is used, TractoFlow will use eddy_cuda for Eddy process. + If local tracking is also enabled, Tractoflow will use the gpu implementation of scil_compute_local_tracking.py. This feature is available with NVidia GPUs only. Without this profile, TractoFlow will run eddy_openmp. fully_reproducible When this profile is used, all the parameters will be set to have 100% reproducible results. diff --git a/main.nf b/main.nf index 763cabb..ba3f383 100644 --- a/main.nf +++ b/main.nf @@ -88,6 +88,8 @@ if(params.help) { "local_max_len":"$params.local_max_len", "local_compress_value":"$params.local_compress_value", "local_random_seed":"$params.local_random_seed", + "local_batch_size_gpu":"$params.local_batch_size_gpu", + "local_tracking_gpu":"$params.local_tracking_gpu", "cpu_count":"$cpu_count", "template_t1":"$params.template_t1", "processes_brain_extraction_t1":"$params.processes_brain_extraction_t1", @@ -373,9 +375,9 @@ if (params.eddy_cmd == "eddy_cpu" && params.processes_eddy == 1 && params.run_ed number_rev_dwi .subscribe{a -> if (a>0) error "Error ~ You have some subjects with a reverse encoding DWI.\n" + - "Eddy will take forever to run with this configuration. \nPlease add " + - "-profile use_cuda with a GPU environnement OR increase the number of processes " + - "for this task (--processes_eddy) to be able to analyse this data."} + "Eddy will take forever to run with this configuration. \nPlease add " + + "-profile use_gpu with a GPU environnement (GPU NVIDIA with cuda) OR increase the number " + + "of processes for this task (--processes_eddy) to be able to analyse this data."} } if (!params.run_topup || !params.run_eddy){ @@ -1893,6 +1895,10 @@ process Local_Tracking { script: compress =\ params.local_compress_streamlines ? '--compress ' + params.local_compress_value : '' + use_gpu =\ + params.local_tracking_gpu ? '--use_gpu' : '' + batch_size_gpu =\ + params.local_batch_size_gpu ? '--batch_size ' + params.local_batch_size_gpu : '' """ export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1 export OMP_NUM_THREADS=1 @@ -1901,8 +1907,10 @@ process Local_Tracking { tmp.trk\ --algo $params.local_algo --$params.local_seeding $params.local_nbr_seeds\ --seed $curr_seed --step $params.local_step --theta $params.local_theta\ - --sfthres $params.local_sfthres --min_length $params.local_min_len\ - --max_length $params.local_max_len $compress --sh_basis $params.basis + --sf $params.local_sfthres --min_length $params.local_min_len\ + --max_length $params.local_max_len $compress --sh_basis $params.basis\ + $use_gpu $batch_size_gpu + scil_remove_invalid_streamlines.py tmp.trk\ ${sid}__local_tracking_${params.local_algo}_${params.local_seeding_mask_type}_seeding_${params.local_tracking_mask_type}_mask_seed_${curr_seed}.trk\ --remove_single_point diff --git a/nextflow.config b/nextflow.config index 6cc6e77..78dd4c7 100644 --- a/nextflow.config +++ b/nextflow.config @@ -139,6 +139,8 @@ params { local_max_len=200 local_compress_value=0.2 local_random_seed=0 + local_tracking_gpu=false + local_batch_size_gpu=10000 //**Number of processes per tasks**// processes_brain_extraction_t1=4 @@ -190,10 +192,12 @@ singularity.autoMounts = true profiles { - use_cuda { + use_gpu { singularity.runOptions='--nv' docker.runOptions='--gpus all' - params.eddy_cmd="eddy_cuda" + params.eddy_cmd="eddy_cuda10.2" + params.local_algo="prob" + params.local_tracking_gpu=true } fully_reproducible {