diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml index 75e282d9b..2aecd2e65 100644 --- a/.github/workflows/branch.yml +++ b/.github/workflows/branch.yml @@ -12,6 +12,7 @@ jobs: # PRs are only ok if coming from an nf-core dev branch - name: Check PRs run: | + { [[ $(git remote get-url origin) == *nf-core/eager ]] && [[ ${GITHUB_HEAD_REF} = "dev" ]]; } || [[ ${GITHUB_HEAD_REF} == "patch" ]] echo "HOME: ${HOME}" echo "GITHUB_WORKFLOW: ${GITHUB_WORKFLOW}" echo "GITHUB_ACTION: ${GITHUB_ACTION}" diff --git a/.github/workflows/nf-core_eager.yml b/.github/workflows/ci.yml similarity index 65% rename from .github/workflows/nf-core_eager.yml rename to .github/workflows/ci.yml index 7213d06af..9d02288b4 100644 --- a/.github/workflows/nf-core_eager.yml +++ b/.github/workflows/ci.yml @@ -10,20 +10,26 @@ jobs: - name: Try Creating Conda env run: | conda env create --prefix nf-core-eager-2.1.0dev-d95a13feb408cc17e95d38f16a81010d --file environment.yml - github_actions_ci: - runs-on: ubuntu-latest + test: + runs-on: ubuntu-18.04 env: TOWER_ACCESS_TOKEN: ${{ secrets.TOWER_ACCESS_TOKEN }} NXF_ANSI_LOG: 0 strategy: matrix: - endedness: ['--singleEnd', '--pairedEnd'] + # Nextflow versions: check pipeline minimum and current latest + nxf_ver: ['19.10.0', ''] + endedness: ['--single_end', '--paired_end'] steps: - uses: actions/checkout@v1 - name: Install Nextflow run: | + export NXF_VER=${{ matrix.nxf_ver }} wget -qO- get.nextflow.io | bash sudo mv nextflow /usr/local/bin/ + - name: Download image + run: | + docker pull nfcore/eager:dev && docker tag nfcore/eager:dev nfcore/eager:dev - name: Extract branch name shell: bash run: echo "::set-env name=RUN_NAME::`echo ${GITHUB_REPOSITORY//\//_}`-`echo ${GITHUB_HEAD_REF//\//@} | rev | cut -f1 -d@ | rev`-${{ github.event_name }}-`echo ${GITHUB_SHA} | cut -c1-6`" @@ -40,61 +46,61 @@ jobs: nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-preindex_ref" -profile test,docker ${{ matrix.endedness }} --bwa_index 'results/reference_genome/bwa_index/BWAIndex/Mammoth_MT_Krause.fasta' --fasta_index 'https://github.com/nf-core/test-datasets/blob/eager/reference/Mammoth/Mammoth_MT_Krause.fasta.fai' - name: REFERENCE Run the basic pipeline with FastA reference with `fna` extension run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-fna_ref" -profile test_fna,docker --pairedEnd + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-fna_ref" -profile test_fna,docker --paired_end - name: REFERENCE Test with zipped reference input run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-gz_ref" -profile test,docker --pairedEnd --fasta 'https://github.com/nf-core/test-datasets/raw/eager/reference/Mammoth/Mammoth_MT_Krause.fasta.gz' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-gz_ref" -profile test,docker --paired_end --fasta 'https://github.com/nf-core/test-datasets/raw/eager/reference/Mammoth/Mammoth_MT_Krause.fasta.gz' - name: FASTP Test fastp complexity filtering run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-fastp" -profile test,docker --pairedEnd --complexity_filter - - name: ADAPTERREMOVAL Test skip pairedEnd collapsing + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-fastp" -profile test,docker --paired_end --complexity_filter + - name: ADAPTERREMOVAL Test skip paired end collapsing run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-skip_collapse" -profile test,docker --pairedEnd --skip_collapse - - name: ADAPTERREMOVAL Test pairedEnd collapsing but no trimming + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-skip_collapse" -profile test,docker --paired_end --skip_collapse + - name: ADAPTERREMOVAL Test paired end collapsing but no trimming run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-pretrim" -profile test_pretrim,docker --pairedEnd --skip_trim + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-pretrim" -profile test_pretrim,docker --paired_end --skip_trim - name: ADAPTERREMOVAL Run the basic pipeline with paired end data without adapterRemoval run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-skip_adapterremoval" -profile test,docker --pairedEnd --skip_adapterremoval + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-skip_adapterremoval" -profile test,docker --paired_end --skip_adapterremoval - name: ADAPTERREMOVAL Run the basic pipeline with preserve5p end option run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-preserve5p" -profile test,docker --pairedEnd --preserve5p + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-preserve5p" -profile test,docker --paired_end --preserve5p - name: ADAPTERREMOVAL Run the basic pipeline with merged only option run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-mergedonly" -profile test,docker --pairedEnd --mergedonly + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-mergedonly" -profile test,docker --paired_end --mergedonly - name: ADAPTERREMOVAL Run the basic pipeline with preserve5p end and merged reads only options run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-preserve5p_mergedonly" -profile test,docker --pairedEnd --preserve5p --mergedonly + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-preserve5p_mergedonly" -profile test,docker --paired_end --preserve5p --mergedonly - name: MAPPER_CIRCULARMAPPER Test running with CircularMapper run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-circularmapper" -profile test,docker --pairedEnd --mapper 'circularmapper' --circulartarget 'NC_007596.2' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-circularmapper" -profile test,docker --paired_end --mapper 'circularmapper' --circulartarget 'NC_007596.2' - name: MAPPER_BWAMEM Test running with BWA Mem run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-bwa_mem" -profile test,docker --pairedEnd --mapper 'bwamem' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-bwa_mem" -profile test,docker --paired_end --mapper 'bwamem' - name: STRIP_FASTQ Run the basic pipeline with output unmapped reads as fastq run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-stripfastq" -profile test,docker --pairedEnd --strip_input_fastq + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-stripfastq" -profile test,docker --paired_end --strip_input_fastq - name: BAM_FILTERING Run basic mapping pipeline with mapping quality filtering, and unmapped export run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-unmapped_export" -profile test,docker --pairedEnd --run_bam_filtering --bam_mapping_quality_threshold 37 --bam_discard_umapped --bam_unmapped_type 'fastq' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-unmapped_export" -profile test,docker --paired_end --run_bam_filtering --bam_mapping_quality_threshold 37 --bam_discard_umapped --bam_unmapped_type 'fastq' - name: GENOTYPING_HC Test running GATK HaplotypeCaller run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-haplotypercaller" -profile test_fna,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'hc' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_hc_emitrefconf 'BP_RESOLUTION' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-haplotypercaller" -profile test_fna,docker --paired_end --dedupper 'dedup' --run_genotyping --genotyping_tool 'hc' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_hc_emitrefconf 'BP_RESOLUTION' - name: GENOTYPING_FB Test running FreeBayes run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-freebayes" -profile test,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'freebayes' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-freebayes" -profile test,docker --paired_end --dedupper 'dedup' --run_genotyping --genotyping_tool 'freebayes' - name: SKIPPING Test checking all skip steps work i.e. input bam, skipping straight to genotyping run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-skipping_logic" -profile test_bam,docker --bam --singleEnd --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --skip_preseq --skip_damage_calculation --run_genotyping --genotyping_tool 'freebayes' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-skipping_logic" -profile test_bam,docker --bam --single_end --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --skip_preseq --skip_damage_calculation --run_genotyping --genotyping_tool 'freebayes' #- name: TRIM_BAM/PMD/GENOTYPING_UG/MULTIVCFANALYZER Test running PMDTools, TrimBam, GATK UnifiedGenotyper and MultiVCFAnalyzer # run: | - # nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-pmd_trimbam_gatkUG_MVA" -profile test,docker --pairedEnd --dedupper 'dedup' --run_trim_bam --run_pmdtools --run_genotyping --genotyping_source 'trimmed' --genotyping_tool 'ug' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_multivcfanalyzer + # nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-pmd_trimbam_gatkUG_MVA" -profile test,docker --paired_end --dedupper 'dedup' --run_trim_bam --run_pmdtools --run_genotyping --genotyping_source 'trimmed' --genotyping_tool 'ug' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_multivcfanalyzer #- name: GENOTYPING_UG/PMD/MULTIVCFANALYZER Test running GATK UnifiedGenotyper and MultiVCFAnalyzer, additional VCFS # run: | - # nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-MVA_additionalvcfs" -profile test,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'ug' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_multivcfanalyzer --additional_vcf_files 'https://raw.githubusercontent.com/nf-core/test-datasets/eager/testdata/Mammoth/vcf/JK2772_CATCAGTGAGTAGA_L008_R1_001.fastq.gz.tengrand.fq.combined.fq.mapped_rmdup.bam.unifiedgenotyper.vcf.gz' --write_allele_frequencies + # nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-MVA_additionalvcfs" -profile test,docker --paired_end --dedupper 'dedup' --run_genotyping --genotyping_tool 'ug' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_multivcfanalyzer --additional_vcf_files 'https://raw.githubusercontent.com/nf-core/test-datasets/eager/testdata/Mammoth/vcf/JK2772_CATCAGTGAGTAGA_L008_R1_001.fastq.gz.tengrand.fq.combined.fq.mapped_rmdup.bam.unifiedgenotyper.vcf.gz' --write_allele_frequencies #- name: VCF2Genome Run basic pipeline with GATK unifiedgenotyper and run VCF2Genome # run: | - # nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-vcf2genome" -profile test,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'ug' --genotyping_source 'raw' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_vcf2genome + # nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-vcf2genome" -profile test,docker --paired_end --dedupper 'dedup' --run_genotyping --genotyping_tool 'ug' --genotyping_source 'raw' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_vcf2genome - name: BAM_INPUT Run the basic pipeline with the bam input profile, skip AdapterRemoval as no convertBam run: | nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-baminput_noConvertBam" -profile test_bam,docker --bam --skip_adapterremoval --run_convertbam @@ -108,20 +114,20 @@ jobs: for i in index0.idx ref.db ref.idx ref.inf table0.db table0.idx taxonomy.idx taxonomy.map taxonomy.tre; do wget https://github.com/nf-core/test-datasets/raw/eager/databases/malt/"$i" -P databases/malt/; done - name: METAGENOMIC Run the basic pipeline but with unmapped reads going into MALT run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-malt" -profile test,docker --pairedEnd --run_bam_filtering --bam_discard_unmapped --bam_unmapped_type 'fastq' --run_metagenomic_screening --database "/home/runner/work/eager/eager/databases/malt/" + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-malt" -profile test,docker --paired_end --run_bam_filtering --bam_discard_unmapped --bam_unmapped_type 'fastq' --run_metagenomic_screening --database "/home/runner/work/eager/eager/databases/malt/" - name: MALTEXTRACT Download resource files run: | mkdir -p databases/maltextract for i in ncbi.tre ncbi.map; do wget https://github.com/rhuebler/HOPS/raw/0.33/Resources/"$i" -P databases/maltextract/; done - name: MALTEXTRACT Basic with MALT plus MaltExtract run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-maltextract" -profile test,docker --pairedEnd --run_bam_filtering --bam_discard_unmapped --bam_unmapped_type 'fastq' --run_metagenomic_screening --metagenomic_tool 'malt' --database "/home/runner/work/eager/eager/databases/malt" --run_maltextract --maltextract_ncbifiles "/home/runner/work/eager/eager/databases/maltextract/" --maltextract_taxon_list 'https://raw.githubusercontent.com/nf-core/test-datasets/eager/testdata/Mammoth/maltextract/MaltExtract_list.txt' + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-maltextract" -profile test,docker --paired_end --run_bam_filtering --bam_discard_unmapped --bam_unmapped_type 'fastq' --run_metagenomic_screening --metagenomic_tool 'malt' --database "/home/runner/work/eager/eager/databases/malt" --run_maltextract --maltextract_ncbifiles "/home/runner/work/eager/eager/databases/maltextract/" --maltextract_taxon_list 'https://raw.githubusercontent.com/nf-core/test-datasets/eager/testdata/Mammoth/maltextract/MaltExtract_list.txt' - name: SEXDETERMINATION Run the basic pipeline with the bam input profile, but don't convert BAM, skip everything but sex determination run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-sexdeterrmine" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --singleEnd --run_sexdeterrmine + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-sexdeterrmine" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --single_end --run_sexdeterrmine - name: NUCLEAR CONTAMINATION Run basic pipeline with bam input profile, but don't convert BAM, skip everything but nuclear contamination estimation run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-nuclear_contamination" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --singleEnd --run_nuclear_contamination + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-nuclear_contamination" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --single_end --run_nuclear_contamination - name: MTNUCRATIO Run basic pipeline with bam input profile, but don't convert BAM, skip everything but nmtnucratio run: | - nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-mtnucratio" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --singleEnd --skip_preseq --skip_damage_calculation --run_mtnucratio + nextflow run ${GITHUB_WORKSPACE} "$TOWER" -name "$RUN_NAME-mtnucratio" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --single_end --skip_preseq --skip_damage_calculation --run_mtnucratio diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index eef48337b..000000000 --- a/.travis.yml +++ /dev/null @@ -1,103 +0,0 @@ -sudo: required -language: python -jdk: openjdk8 -services: docker -python: '3.6' -cache: pip -matrix: - fast_finish: true - -before_install: - # PRs to master are only ok if coming from dev branch - - '[ $TRAVIS_PULL_REQUEST = "false" ] || [ $TRAVIS_BRANCH != "master" ] || ([ $TRAVIS_PULL_REQUEST_SLUG = $TRAVIS_REPO_SLUG ] && ([ $TRAVIS_PULL_REQUEST_BRANCH = "dev" ] || [ $TRAVIS_PULL_REQUEST_BRANCH = "patch" ]))' - # Pull the docker image first so the test doesn't wait for this - - docker pull nfcore/eager:dev - # Fake the tag locally so that the pipeline runs properly - # Looks weird when this is :dev to :dev, but makes sense when testing code for a release (:dev to :1.0.1) - - docker tag nfcore/eager:dev nfcore/eager:dev - -install: - # Install Nextflow - - mkdir /tmp/nextflow && cd /tmp/nextflow - - wget -qO- get.nextflow.io | bash - - sudo ln -s /tmp/nextflow/nextflow /usr/local/bin/nextflow - # Install nf-core/tools - - pip install --upgrade pip - - pip install nf-core - # Install Conda - - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh - - bash Miniconda3-latest-Linux-x86_64.sh -b -f -p $HOME/miniconda - - export PATH="$HOME/miniconda/bin:$PATH" - # Reset - - mkdir ${TRAVIS_BUILD_DIR}/tests && cd ${TRAVIS_BUILD_DIR}/tests - # Install markdownlint-cli - - sudo apt-get install npm && npm install -g markdownlint-cli - -env: - - NXF_VER='19.10.0' NXF_ANSI_LOG=0 RUN_NAME="eager-${TRAVIS_PULL_REQUEST_BRANCH}-${TRAVIS_JOB_NUMBER}" # Specify a minimum NF version that should be tested and work - -script: - # Lint the pipeline code - - nf-core lint ${TRAVIS_BUILD_DIR} - # REFERENCE: Run the basic pipeline with the test profile - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-basic" -profile test,docker --pairedEnd --saveReference - # REFERENCE: Run the basic pipeline with single end data (pretending its single end actually) and all prepared index files - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-singleEnd" -profile test,docker --singleEnd - # REFERENCE: Run the basic pipeline with FastA reference with `fna` extension - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-fna_ref" -profile test_fna,docker --pairedEnd --saveReference - # REFERENCE: Test using pre-computed indices from a separate run beforehand - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-preindex_ref" -profile test_fna,docker --pairedEnd --bwa_index 'results/reference_genome/bwa_index/BWAIndex/Mammoth_MT_Krause.fna' --fasta_index 'results/reference_genome/fasta_index/Mammoth_MT_Krause.fna.fai' --seq_dict 'results/reference_genome/seq_dict/Mammoth_MT_Krause.dict' - # REFERENCE: Test with zipped reference input - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-gz_ref" -profile test,docker --pairedEnd --fasta 'https://github.com/jfy133/test-datasets/raw/eager/reference/Mammoth/Mammoth_MT_Krause.fasta.gz' - # FASTP: Run the same pipeline testing optional steps of fastp, complexity - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-fastp" -profile test,docker --pairedEnd --complexity_filter - # ADAPTERREMOVAL: Run the basic pipeline with paired end data without collapsing - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-skip_collapse" -profile test,docker --pairedEnd --skip_collapse - # ADAPTERREMOVAL: Run the basic pipeline with paired end data without trimming, but still merge - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-pretrim" -profile test_pretrim,docker --pairedEnd --skip_trim - # ADAPTERREMOVAL: Run the basic pipeline with paired end data without adapterRemoval - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-skip_adapterremoval" -profile test,docker --pairedEnd --skip_adapterremoval - # ADAPTERREMOVAL: Run the basic pipeline with preserve5p end option - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-preserve5p" -profile test,docker --pairedEnd --preserve5p - # ADAPTERREMOVAL: Run the basic pipeline with merged only option - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-mergedonly" -profile test,docker --pairedEnd --mergedonly - # ADAPTERREMOVAL: Run the basic pipeline with preserve5p end and merged reads only options - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-preserve5p_mergedonly" -profile test,docker --pairedEnd --preserve5p --mergedonly - # MAPPER_CIRCULARMAPPER: Test running with CircularMapper - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-circularmapper" -profile test,docker --pairedEnd --mapper 'circularmapper' --circulartarget 'NC_007596.2' - # MAPPER_BWAMEM: Test running with BWA Mem - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-bwa_mem" -profile test,docker --pairedEnd --mapper 'bwamem' - # STRIP_FASTQ: Run the basic pipeline with output unmapped reads as fastq - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-stripfastq" -profile test,docker --pairedEnd --strip_input_fastq - # BAM_FILTERING: Run basic mapping pipeline with mapping quality filtering, and unmapped export - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-unmapped_export" -profile test,docker --pairedEnd --run_bam_filtering --bam_mapping_quality_threshold 37 --bam_discard_umapped --bam_unmapped_type 'fastq' - # GENOTYPING_HC: Test running GATK HaplotypeCaller - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-haplotypercaller" -profile test_fna,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'hc' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_hc_emitrefconf 'BP_RESOLUTION' - # GENOTYPING_FB: Test running FreeBayes - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-freebayes" -profile test,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'freebayes' - # SKIPPING: Test checking all skip steps work i.e. input bam, skipping straight to genotyping - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-skipping_logic" -profile test_bam,docker --bam --singleEnd --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --skip_preseq --skip_damage_calculation --run_genotyping --genotyping_tool 'freebayes' - # TRIM_BAM/PMD/GENOTYPING_UG/MULTIVCFANALYZER: Test running PMDTools, TrimBam, GATK UnifiedGenotyper and MultiVCFAnalyzer - #- nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-pmd_trimbam_unifiedgenotyper_multivcfanalyzer" -profile test,docker --pairedEnd --dedupper 'dedup' --run_trim_bam --run_pmdtools --run_genotyping --genotyping_source 'trimmed' --genotyping_tool 'ug' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_multivcfanalyzer - # GENOTYPING_UG/PMD/MULTIVCFANALYZER: Test running GATK UnifiedGenotyper and MultiVCFAnalyzer, additional VCFS - #- nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-multivcfanalyzer_additionalvcfs" -profile test,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'ug' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_multivcfanalyzer --additional_vcf_files 'https://raw.githubusercontent.com/jfy133/test-datasets/eager/testdata/Mammoth/vcf/JK2772_CATCAGTGAGTAGA_L008_R1_001.fastq.gz.tengrand.fq.combined.fq.mapped_rmdup.bam.unifiedgenotyper.vcf.gz' --write_allele_frequencies - # VCF2GENOME: Test running GATK UnifiedGenotyper and run VCF2GENOME - #- nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-vcf2genome" -profile test,docker --pairedEnd --dedupper 'dedup' --run_genotyping --genotyping_tool 'ug' --genotyping_source 'raw' --gatk_out_mode 'EMIT_ALL_SITES' --gatk_ug_genotype_model 'SNP' --run_vcf2genome - # BAM_INPUT: Run the basic pipeline with the bam input profile, skip AdapterRemoval as no convertBam - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-baminput_noConvertBam" -profile test_bam,docker --bam --skip_adapterremoval --run_convertbam - # BAM_INPUT: Run the basic pipeline with the bam input profile, convert to FASTQ for adapterremoval test and downstream - - nextflow run ${TRAVIS_BUILD_DIR} -name "$RUN_NAME-baminput_convertbam_basic" -profile test_bam,docker --bam --run_convertbam - # METAGENOMIC Download database and Run the basic pipeline but with unmapped reads going into MALT - - mkdir -p ${TRAVIS_BUILD_DIR}/databases/malt && for i in index0.idx ref.db ref.idx ref.inf table0.db table0.idx taxonomy.idx taxonomy.map taxonomy.tre; do wget https://github.com/nf-core/test-datasets/raw/eager/databases/malt/"$i" -P "${TRAVIS_BUILD_DIR}/databases/malt/"; done - - nextflow run ${TRAVIS_BUILD_DIR} "$TOWER" -name "$RUN_NAME-malt" -profile test,docker --pairedEnd --run_bam_filtering --bam_discard_unmapped --bam_unmapped_type 'fastq' --run_metagenomic_screening --database "${TRAVIS_BUILD_DIR}/databases/malt/" - # MALTEXTRACT Download NCBI db, ncbi map/tree, additional files and run test - - mkdir -p ${TRAVIS_BUILD_DIR}/databases/maltextract && for i in ncbi.tre ncbi.map; do wget https://github.com/rhuebler/HOPS/raw/external/Resources/"$i" -P ${TRAVIS_BUILD_DIR}/databases/maltextract/; done - - nextflow run ${TRAVIS_BUILD_DIR} "$TOWER" -name "$RUN_NAME-maltextract" -profile test,docker --pairedEnd --run_bam_filtering --bam_discard_unmapped --bam_unmapped_type 'fastq' --run_metagenomic_screening --metagenomic_tool 'malt' --database "${TRAVIS_BUILD_DIR}/databases/malt/" --run_maltextract --maltextract_ncbifiles "${TRAVIS_BUILD_DIR}/databases/maltextract/" --maltextract_taxon_list 'https://raw.githubusercontent.com/nf-core/test-datasets/eager/testdata/Mammoth/maltextract/MaltExtract_list.txt' - # SEXDETERMINAION: Run the basic pipeline with the bam input profile, but don't convert BAM, skip everything but sex determination - - nextflow run ${TRAVIS_BUILD_DIR} "$TOWER" -name "$RUN_NAME-sexdeterrmine" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --singleEnd --skip_preseq --skip_damage_calculation --run_sexdeterrmine - # NUCLEAR CONTAMINATION ESTIMATION Premapped BAM run skipping most steps but run nuclear contamination with ANGSD - - nextflow run ${TRAVIS_BUILD_DIR} "$TOWER" -name "$RUN_NAME-nuclearcontamination" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --singleEnd --skip_preseq --skip_damage_calculation --run_nuclear_contamination - # MTNUCRATIO Run basic pipeline with bam input profile, but don't convert BAM, skip everything but nmtnucratio - - nextflow run ${TRAVIS_BUILD_DIR} "$TOWER" -name "$RUN_NAME-mtnucratio" -profile test_humanbam,docker --bam --skip_fastqc --skip_adapterremoval --skip_mapping --skip_deduplication --skip_qualimap --singleEnd --skip_preseq --skip_damage_calculation --run_mtnucratio - - diff --git a/CHANGELOG.md b/CHANGELOG.md index 1296d6b69..ba6816e8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. * Fancy new logo from [ZandraFagernas](https://github.com/ZandraFagernas) * [#286](https://github.com/nf-core/eager/issues/286) - Adds pipeline-specific profiles (loaded from nf-core configs) * [#310](https://github.com/nf-core/eager/issues/310) - Generalises base.config -* [#326](https://github.com/nf-core/eager/pull/326) - Add Biopython and [xopen](https://github.com/marcelm/xopen/) dependancies +* [#326](https://github.com/nf-core/eager/pull/326) - Add Biopython and [xopen](https://github.com/marcelm/xopen/) dependencies ### `Fixed` @@ -34,6 +34,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. * [#237](https://github.com/nf-core/eager/issues/237) - Fixed and Updated script scrape_software_versions * [#322](https://github.com/nf-core/eager/pull/322) - Move extract map reads fastq compression to pigz * [#327](https://github.com/nf-core/eager/pull/327) - Speed up strip_input_fastq process and make it more robust +* [#342](https://github.com/nf-core/eager/pull/342) - Updated to match nf-core tools 1.8 linting guidelines ### `Dependencies` diff --git a/Dockerfile b/Dockerfile index 7bd109a7d..93eaa979e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,7 @@ -FROM nfcore/base:1.7 +FROM nfcore/base:1.8 LABEL description="Docker image containing all requirements for nf-core/eager pipeline" COPY environment.yml / RUN conda env create -f /environment.yml && conda clean -a +RUN conda env export --name nf-core-eager-2.1.0dev > nf-core-eager-2.1.0dev.yml ENV PATH /opt/conda/envs/nf-core-eager-2.1.0dev/bin:$PATH diff --git a/README.md b/README.md index 3e0d73766..75a30d80b 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,7 @@ Additional functionality contained by the pipeline currently includes: 4. Test the pipeline using the provided test data - nextflow run nf-core/eager -profile ,test --pairedEnd + nextflow run nf-core/eager -profile ,test --paired_end 5. Start running your own ancient DNA analysis! diff --git a/conf/test.config b/conf/test.config index 244083be0..88c0a2d6b 100644 --- a/conf/test.config +++ b/conf/test.config @@ -16,7 +16,7 @@ params { max_time = 48.h genome = false //Input data - singleEnd = false + single_end = false readPaths = [['JK2782_TGGCCGATCAACGA_L008', ['https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2782_TGGCCGATCAACGA_L008_R1_001.fastq.gz.tengrand.fq.gz','https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2782_TGGCCGATCAACGA_L008_R2_001.fastq.gz.tengrand.fq.gz']], ['JK2802_AGAATAACCTACCA_L008', ['https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2802_AGAATAACCTACCA_L008_R1_001.fastq.gz.tengrand.fq.gz','https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2802_AGAATAACCTACCA_L008_R2_001.fastq.gz.tengrand.fq.gz']], ] diff --git a/conf/test_bam.config b/conf/test_bam.config index deb7bf8a8..1a2925ba9 100644 --- a/conf/test_bam.config +++ b/conf/test_bam.config @@ -16,7 +16,7 @@ params { max_time = 48.h genome = false //Input data - singleEnd = true + single_end = true readPaths = 'https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/bam/JK2782_TGGCCGATCAACGA_L008_R1_001.fastq.gz.tengrand.fq.combined.fq.mapped.bam' // Genome references fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/eager/reference/Mammoth/Mammoth_MT_Krause.fasta' diff --git a/conf/test_fna.config b/conf/test_fna.config index 5519df04c..aa661c4a9 100644 --- a/conf/test_fna.config +++ b/conf/test_fna.config @@ -16,7 +16,7 @@ params { max_time = 48.h genome = false //Input data - singleEnd = false + single_end = false readPaths = [['JK2782_TGGCCGATCAACGA_L008', ['https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2782_TGGCCGATCAACGA_L008_R1_001.fastq.gz.tengrand.fq.gz','https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2782_TGGCCGATCAACGA_L008_R2_001.fastq.gz.tengrand.fq.gz']], ['JK2802_AGAATAACCTACCA_L008', ['https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2802_AGAATAACCTACCA_L008_R1_001.fastq.gz.tengrand.fq.gz','https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2802_AGAATAACCTACCA_L008_R2_001.fastq.gz.tengrand.fq.gz']], ] diff --git a/conf/test_humanbam.config b/conf/test_humanbam.config index e9b3059a4..c65be53d6 100644 --- a/conf/test_humanbam.config +++ b/conf/test_humanbam.config @@ -16,7 +16,7 @@ params { max_time = 48.h genome = false //Input data - singleEnd = true + single_end = true reads = 'https://github.com/nf-core/test-datasets/raw/eager/testdata/Human/bam/JK2067_downsampled_s0.1.bam' // Genome references fasta = 'https://raw.githubusercontent.com/nf-core/test-datasets/eager/reference/Mammoth/Mammoth_MT_Krause.fasta' diff --git a/conf/test_pretrim.config b/conf/test_pretrim.config index 68de624ff..3632e63ea 100644 --- a/conf/test_pretrim.config +++ b/conf/test_pretrim.config @@ -16,7 +16,7 @@ params { max_time = 48.h genome = false //Input data - singleEnd = false + single_end = false readPaths = [['JK2782_TGGCCGATCAACGA_L008', ['https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2782_TGGCCGATCAACGA_L008_R1_001.fastq.gz.tengrand.fq.pair1.truncated.fq.gz','https://github.com/nf-core/test-datasets/raw/eager/testdata/Mammoth/fastq/JK2782_TGGCCGATCAACGA_L008_R1_001.fastq.gz.tengrand.fq.pair2.truncated.fq.gz']], ] // Genome references diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d390c07f7..01a73a255 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -25,7 +25,7 @@ If no file, only one input file, or only read one and not read two is picked up 1. The path must be enclosed in quotes (`'` or `"`) 2. The path must have at least one `*` wildcard character. This is even if you are only running one paired end sample. 3. When using the pipeline with paired end data, the path must use `{1,2}` or `{R1,R2}` notation to specify read pairs. -4. If you are running Single end data make sure to specify `--singleEnd` +4. If you are running Single end data make sure to specify `--single_end` If the pipeline can't find your files then you will get the following error diff --git a/docs/usage.md b/docs/usage.md index 389d54133..18629480c 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -184,37 +184,37 @@ If left unspecified, a default pattern is used: `data/*{1,2}.fastq.gz` **Note**: It is not possible to run a mixture of single-end and paired-end files in one run. -#### `--singleEnd` +#### `--single_end` -If you have single-end data or BAM files, you need to specify `--singleEnd` on the command line when you launch the pipeline. A normal glob pattern, enclosed in quotation marks, can then be used for `--reads`. For example: +If you have single-end data or BAM files, you need to specify `--single_end` on the command line when you launch the pipeline. A normal glob pattern, enclosed in quotation marks, can then be used for `--reads`. For example: ```bash ---singleEnd --reads 'path/to/data/*.fastq' +--single_end --reads 'path/to/data/*.fastq' ``` for a single sample, or ```bash ---singleEnd --reads 'path/to/data/*/*.fastq' +--single_end --reads 'path/to/data/*/*.fastq' ``` for multiple samples, where each sample's FASTQs are in it's own directory (indicated by the first `*`) **Note**: It is not possible to run a mixture of single-end and paired-end files in one run. -#### `--pairedEnd` +#### `--paired_end` -If you have paired-end data, you need to specify `--pairedEnd` on the command line when you launc hthe pipeline. +If you have paired-end data, you need to specify `--paired_end` on the command line when you launc hthe pipeline. A normal glob pattern, enclosed in quotation marks, can then be used for `--reads`. For example: ```bash ---pairedEnd --reads '*.fastq' +--paired_end --reads '*.fastq' ``` #### `--bam` -Specifies the input file type to `--reads` is in BAM format. This is only valid in combination with `--singleEnd`. +Specifies the input file type to `--reads` is in BAM format. This is only valid in combination with `--single_end`. #### `--fasta` @@ -299,7 +299,7 @@ For example: ```bash nextflow run nf-core/eager \ -profile test,docker \ ---pairedEnd \ +--paired_end \ --reads '*{R1,R2}*.fq.gz' --fasta 'results/reference_genome/bwa_index/BWAIndex/Mammoth_MT_Krause.fasta' \ --bwa_index 'results/reference_genome/bwa_index/BWAIndex/Mammoth_MT_Krause.fasta' @@ -491,7 +491,7 @@ Turns off the paired-end read merging. For example ```bash ---pairedEnd --skip_collapse --reads '*.fastq' +--paired_end --skip_collapse --reads '*.fastq' ``` #### `--skip_trim` @@ -501,7 +501,7 @@ Turns off adaptor and quality trimming. For example: ```bash ---pairedEnd --skip_trim --reads '*.fastq' +--paired_end --skip_trim --reads '*.fastq' ``` #### `--preserve5p` diff --git a/main.nf b/main.nf index 4adbd312e..4adc494fc 100644 --- a/main.nf +++ b/main.nf @@ -29,8 +29,8 @@ def helpMessage() { Mandatory arguments: --reads Path to input data (must be surrounded with quotes). For paired end data, the path must use '{1,2}' notation to specify read pairs -profile Institution or personal hardware config to use (e.g. standard, docker, singularity, conda, aws). Ask your system admin if unsure, or check documentation - --singleEnd Specifies that the input is single end reads (required if not pairedEnd) - --pairedEnd Specifies that the input is paired end reads (required if not singleEnd) + --single_end Specifies that the input is single end reads (required if not paired_end) + --paired_end Specifies that the input is paired end reads (required if not single_end) --bam Specifies that the input is in BAM format --fasta Path and name of FASTA reference file (required if not iGenome reference). File suffixes can be: '.fa', '.fn', '.fna', '.fasta' --genome Name of iGenomes reference (required if not fasta reference) @@ -297,8 +297,8 @@ if (params.bam && !params.run_convertbam && !params.skip_adapterremoval ) { } // Validate BAM is single end only -if (params.bam && !params.singleEnd){ - exit 1, "BAM input must be used with --singleEnd " +if (params.bam && !params.single_end){ + exit 1, "BAM input must be used with --single_end " } // Validate that you're not trying to pass FASTQs to BAM only processes @@ -311,15 +311,15 @@ if (params.bam && !params.run_convertbam && !params.skip_mapping) { exit 1, "You can't directly map a BAM file! Please supply the --run_convertbam parameter!" } -// Validate that either pairedEnd or singleEnd has been specified by the user! -if( params.singleEnd || params.pairedEnd || params.bam){ +// Validate that either paired_end or single_end has been specified by the user! +if( params.single_end || params.paired_end || params.bam){ } else { - exit 1, "Please specify either --singleEnd, --pairedEnd to execute the pipeline on FastQ files and --bam for previously processed BAM files!" + exit 1, "Please specify either --single_end, --paired_end to execute the pipeline on FastQ files and --bam for previously processed BAM files!" } -// Validate that skip_collapse is only set to True for pairedEnd reads! -if (params.skip_collapse && params.singleEnd){ - exit 1, "--skip_collapse can only be set for pairedEnd samples!" +// Validate that skip_collapse is only set to True for paired_end reads! +if (params.skip_collapse && params.single_end){ + exit 1, "--skip_collapse can only be set for paired_end samples!" } // Strip mode sanity checking @@ -502,7 +502,7 @@ if( workflow.profile == 'awsbatch') { // is NOT read paths && BAM if( params.readPaths ){ - if( params.singleEnd && !params.bam) { + if( params.single_end && !params.bam) { Channel .from( params.readPaths ) .filter { it =~/.*.fastq.gz|.*.fq.gz|.*.fastq|.*.fq/ } @@ -532,10 +532,10 @@ if( params.readPaths ){ } } else if (!params.bam){ Channel - .fromFilePairs( params.reads, size: params.singleEnd ? 1 : 2 ) + .fromFilePairs( params.reads, size: params.single_end ? 1 : 2 ) .filter { it =~/.*.fastq.gz|.*.fq.gz|.*.fastq|.*.fq/ } .ifEmpty { exit 1, "Cannot find any reads matching: ${params.reads}\nNB: Path needs " + - "to be enclosed in quotes!\nNB: Path requires at least one * wildcard!\nValid input file types: .fastq.gz', '.fq.gz', '.fastq', or '.fq'\nIf this is single-end data, please specify --singleEnd on the command line." } + "to be enclosed in quotes!\nNB: Path requires at least one * wildcard!\nValid input file types: .fastq.gz', '.fq.gz', '.fastq', or '.fq'\nIf this is single-end data, please specify --single_end on the command line." } .into { ch_input_for_skipconvertbam; ch_input_for_convertbam; ch_input_for_indexbam } } else { @@ -560,7 +560,7 @@ summary['Reads'] = params.reads summary['Fasta Ref'] = params.fasta summary['BAM Index Type'] = (params.large_ref == "") ? 'BAI' : 'CSI' if(params.bwa_index) summary['BWA Index'] = params.bwa_index -summary['Data Type'] = params.singleEnd ? 'Single-End' : 'Paired-End' +summary['Data Type'] = params.single_end ? 'Single-End' : 'Paired-End' summary['Skipping FASTQC?'] = params.skip_fastqc ? 'Yes' : 'No' summary['Skipping AdapterRemoval?'] = params.skip_adapterremoval ? 'Yes' : 'No' if (!params.skip_adapterremoval) { @@ -876,7 +876,7 @@ process fastp { file("*.json") into ch_fastp_for_multiqc script: - if(params.singleEnd){ + if(params.single_end){ """ fastp --in1 ${reads[0]} --out1 "${reads[0].baseName}.pG.fq.gz" -A -g --poly_g_min_len "${params.complexity_filter_poly_g_min}" -Q -L -w ${task.cpus} --json "${reads[0].baseName}"_fastp.json """ @@ -927,7 +927,7 @@ process adapter_removal { mergedonly = params.mergedonly ? "Y" : "N" //PE mode, dependent on trim_me and collapse_me the respective procedure is run or not :-) - if (!params.singleEnd && !params.skip_collapse && !params.skip_trim){ + if (!params.single_end && !params.skip_collapse && !params.skip_trim){ """ mkdir -p output AdapterRemoval --file1 ${reads[0]} --file2 ${reads[1]} --basename ${base} ${trim_me} --gzip --threads ${task.cpus} ${collapse_me} ${preserve5p} @@ -946,14 +946,14 @@ process adapter_removal { mv *.settings output/ """ //PE, don't collapse, but trim reads - } else if (!params.singleEnd && params.skip_collapse && !params.skip_trim) { + } else if (!params.single_end && params.skip_collapse && !params.skip_trim) { """ mkdir -p output AdapterRemoval --file1 ${reads[0]} --file2 ${reads[1]} --basename ${base} --gzip --threads ${task.cpus} ${trim_me} ${collapse_me} ${preserve5p} mv *.settings ${base}.pair*.truncated.gz output/ """ //PE, collapse, but don't trim reads - } else if (!params.singleEnd && !params.skip_collapse && params.skip_trim) { + } else if (!params.single_end && !params.skip_collapse && params.skip_trim) { """ mkdir -p output AdapterRemoval --file1 ${reads[0]} --file2 ${reads[1]} --basename ${base} --gzip --threads ${task.cpus} --basename ${base} ${collapse_me} ${trim_me} @@ -1039,7 +1039,7 @@ process bwa { fasta = "${index}/${bwa_base}" //PE data without merging, PE data without any AR applied - if (!params.singleEnd && (params.skip_collapse || params.skip_adapterremoval)){ + if (!params.single_end && (params.skip_collapse || params.skip_adapterremoval)){ prefix = "${reads[0].baseName}" """ bwa aln -t ${task.cpus} $fasta ${reads[0]} -n ${params.bwaalnn} -l ${params.bwaalnl} -k ${params.bwaalnk} -f ${prefix}.r1.sai @@ -1109,7 +1109,7 @@ process circularmapper{ size = "${params.large_ref}" ? '-c' : '' - if (!params.singleEnd && params.skip_collapse ){ + if (!params.single_end && params.skip_collapse ){ prefix = "${reads[0].baseName}" """ bwa aln -t ${task.cpus} $elongated_root ${reads[0]} -n ${params.bwaalnn} -l ${params.bwaalnl} -k ${params.bwaalnk} -f ${prefix}.r1.sai @@ -1153,7 +1153,7 @@ process bwamem { prefix = "${reads[0].baseName}" size = "${params.large_ref}" ? '-c' : '' - if (!params.singleEnd && params.skip_collapse){ + if (!params.single_end && params.skip_collapse){ """ bwa mem -t ${task.cpus} $fasta ${reads[0]} ${reads[1]} -R "@RG\\tID:ILLUMINA-${prefix}\\tSM:${prefix}\\tPL:illumina" | samtools sort -@ ${task.cpus} -O bam - > "${prefix}".mapped.bam samtools index "${size}" -@ ${task.cpus} "${prefix}".mapped.bam @@ -1317,7 +1317,7 @@ process strip_input_fastq { script: - if (params.singleEnd) { + if (params.single_end) { out_fwd = bam.baseName+'.stripped.fq.gz' """ samtools index $bam @@ -1388,7 +1388,7 @@ process dedup{ treat_merged="${params.dedup_all_merged}" ? '-m' : '' size = "${params.large_ref}" ? '-c' : '' - if(params.singleEnd) { + if(params.single_end) { """ dedup -i $bam $treat_merged -o . -u mv *.log dedup.log diff --git a/nextflow.config b/nextflow.config index 87c8635f1..cd0544708 100644 --- a/nextflow.config +++ b/nextflow.config @@ -8,8 +8,8 @@ params { //Pipeline options //Input reads - singleEnd = false - pairedEnd = false + single_end = false + paired_end = false reads = "data/*{1,2}.fastq.gz" readPaths = false bam = false @@ -204,7 +204,7 @@ params { tracedir = "${params.outdir}/pipeline_info" awsqueue = false awsregion = 'eu-west-1' - igenomesIgnore = false + igenomes_ignore = false custom_config_version = 'master' custom_config_base = "https://raw.githubusercontent.com/nf-core/configs/${params.custom_config_version}" hostnames = false @@ -252,7 +252,7 @@ profiles { test_pretrim { includeConfig 'conf/test_pretrim.config' } } // Load igenomes.config if required -if(!params.igenomesIgnore){ +if(!params.igenomes_ignore){ includeConfig 'conf/igenomes.config' } // Capture exit codes from upstream processes when piping