diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 099c79b9..3b1b4219 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -48,9 +48,9 @@ These tests are run both with the latest available version of `Nextflow` and als :warning: Only in the unlikely and regretful event of a release happening with a bug. -- On your own fork, make a new branch `patch` based on `upstream/master`. -- Fix the bug, and bump version (X.Y.Z+1). -- A PR should be made on `master` from patch to directly this particular bug. +- On your own fork, make a new branch `patch` based on `upstream/master`. +- Fix the bug, and bump version (X.Y.Z+1). +- A PR should be made on `master` from patch to directly this particular bug. ## Getting help @@ -91,8 +91,8 @@ The process resources can be passed on to the tool dynamically within the proces Please use the following naming schemes, to make it easy to understand what is going where. -- initial process channel: `ch_output_from_` -- intermediate and terminal channels: `ch__for_` +- initial process channel: `ch_output_from_` +- intermediate and terminal channels: `ch__for_` ### Nextflow version bumping diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4b79f9bf..8e2c98da 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -13,13 +13,13 @@ Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/scrn ## PR checklist -- [ ] This comment contains a description of changes (with reason). -- [ ] If you've fixed a bug or added code that should be tested, add tests! - - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/scrnaseq/tree/master/.github/CONTRIBUTING.md) - - [ ] If necessary, also make a PR on the nf-core/scrnaseq _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. -- [ ] Make sure your code lints (`nf-core lint`). -- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir `). -- [ ] Usage Documentation in `docs/usage.md` is updated. -- [ ] Output Documentation in `docs/output.md` is updated. -- [ ] `CHANGELOG.md` is updated. -- [ ] `README.md` is updated (including new tool citations and authors/contributors). +- [ ] This comment contains a description of changes (with reason). +- [ ] If you've fixed a bug or added code that should be tested, add tests! + - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/scrnaseq/tree/master/.github/CONTRIBUTING.md) + - [ ] If necessary, also make a PR on the nf-core/scrnaseq _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. +- [ ] Make sure your code lints (`nf-core lint`). +- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir `). +- [ ] Usage Documentation in `docs/usage.md` is updated. +- [ ] Output Documentation in `docs/output.md` is updated. +- [ ] `CHANGELOG.md` is updated. +- [ ] `README.md` is updated (including new tool citations and authors/contributors). diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml index 01f9d0ab..a274eedb 100644 --- a/.github/workflows/awsfulltest.yml +++ b/.github/workflows/awsfulltest.yml @@ -18,7 +18,7 @@ jobs: # TODO nf-core: You can customise AWS full pipeline tests as required # Add full size test data (but still relatively small datasets for few samples) # on the `test_full.config` test runs with only one set of parameters - + with: workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} @@ -31,4 +31,4 @@ jobs: "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/scrnaseq/results-${{ github.sha }}" } profiles: test_full,aws_tower - pre_run_script: 'export NXF_VER=21.10.3' + pre_run_script: "export NXF_VER=21.10.3" diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml index 49f880fe..ddcb32f5 100644 --- a/.github/workflows/awstest.yml +++ b/.github/workflows/awstest.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Launch workflow via tower uses: nf-core/tower-action@v2 - + with: workspace_id: ${{ secrets.TOWER_WORKSPACE_ID }} access_token: ${{ secrets.TOWER_ACCESS_TOKEN }} @@ -25,4 +25,4 @@ jobs: "outdir": "s3://${{ secrets.AWS_S3_BUCKET }}/scrnaseq/results-test-${{ github.sha }}" } profiles: test,aws_tower - pre_run_script: 'export NXF_VER=21.10.3' + pre_run_script: "export NXF_VER=21.10.3" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d64a3899..10cfd1ce 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,17 +23,17 @@ jobs: # Nextflow versions include: # Test pipeline minimum Nextflow version - - NXF_VER: '21.10.3' - NXF_EDGE: '' + - NXF_VER: "21.10.3" + NXF_EDGE: "" # Test latest edge release of Nextflow - - NXF_VER: '' - NXF_EDGE: '1' + - NXF_VER: "" + NXF_EDGE: "1" profile: [ "test,docker --aligner alevin", "test,docker --aligner kallisto", "test,docker --aligner star", - "test,docker --aligner cellranger" + "test,docker --aligner cellranger", ] steps: - name: Check out pipeline code diff --git a/CHANGELOG.md b/CHANGELOG.md index 806a2496..8eabdc1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,29 +5,29 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## v2.0dev - -* Pipeline ported to dsl2 -* Template update with latest nf-core/tools v2.1 +- Pipeline ported to dsl2 +- Template update with latest nf-core/tools v2.1 ### Fixes -* Make sure pipeline runs on multiple samples [#77](https://github.com/nf-core/scrnaseq/pull/77) +- Make sure pipeline runs on multiple samples [#77](https://github.com/nf-core/scrnaseq/pull/77) ## v1.1.0 - 2021-03-24 "Olive Mercury Corgi" -* Template update with latest nf-core/tools v1.13.2 -* Parameters JSON Schema added [#42](https://github.com/nf-core/scrnaseq/issues/42) -* [25](https://github.com/nf-core/scrnaseq/issues/25) Fix small documentation error with wrong parameter for txp2gene +- Template update with latest nf-core/tools v1.13.2 +- Parameters JSON Schema added [#42](https://github.com/nf-core/scrnaseq/issues/42) +- [25](https://github.com/nf-core/scrnaseq/issues/25) Fix small documentation error with wrong parameter for txp2gene ### Fixes -* [#20](https://github.com/nf-core/scrnaseq/issues/20) Fix Transcriptome Fasta argument not detected well -* [#21](https://github.com/nf-core/scrnaseq/issues/21) Fix `--kallisto_index` being ignored +- [#20](https://github.com/nf-core/scrnaseq/issues/20) Fix Transcriptome Fasta argument not detected well +- [#21](https://github.com/nf-core/scrnaseq/issues/21) Fix `--kallisto_index` being ignored ## v1.0.0 - 2019-11-28 "Tiny Aluminium Crab" Initial release of nf-core/scrnaseq, created with the [nf-core](http://nf-co.re/) template. This includes the following workflow options: -* Salmon Alevin + AlevinQC -* STARSolo -* Kallisto / BUStools +- Salmon Alevin + AlevinQC +- STARSolo +- Kallisto / BUStools diff --git a/CITATIONS.md b/CITATIONS.md index 05e916e6..783a1166 100644 --- a/CITATIONS.md +++ b/CITATIONS.md @@ -10,35 +10,42 @@ ## Pipeline tools -* [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) +- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) + +- [MultiQC](https://pubmed.ncbi.nlm.nih.gov/27312411/) -* [MultiQC](https://pubmed.ncbi.nlm.nih.gov/27312411/) > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924. -* [Alevin](https://doi.org/10.1186/s13059-019-1670-y) +- [Alevin](https://doi.org/10.1186/s13059-019-1670-y) + > Srivastava, A., Malik, L., Smith, T. et al. Alevin efficiently estimates accurate gene abundances from dscRNA-seq data. Genome Biol 20, 65 (2019). -* [Salmon](https://www.nature.com/articles/nmeth.4197) +- [Salmon](https://www.nature.com/articles/nmeth.4197) + > Patro, R., Duggal, G., Love, M. et al. Salmon provides fast and bias-aware quantification of transcript expression. Nat Methods 14, 417–419 (2017). -* [Kallisto/Bustools](https://www.nature.com/articles/s41587-021-00870-2) +- [Kallisto/Bustools](https://www.nature.com/articles/s41587-021-00870-2) + > Melsted, P., Booeshaghi, A.S., Liu, L. et al. Modular, efficient and constant-memory single-cell RNA-seq preprocessing. Nat Biotechnol 39, 813–818 (2021). -* [StarSolo](https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1) +- [StarSolo](https://www.biorxiv.org/content/10.1101/2021.05.05.442755v1) > Benjamin Kaminow, Dinar Yunusov, Alexander Dobin. STARsolo: accurate, fast and versatile mapping/quantification of single-cell and single-nucleus RNA-seq data. BioRxiv 2021.05.05.442755 (2021). ## Software packaging/containerisation tools -* [Anaconda](https://anaconda.com) +- [Anaconda](https://anaconda.com) + > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web. -* [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/) +- [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/) + > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506. -* [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/) +- [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/) + > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671. -* [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) +- [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) -* [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) +- [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index f4fd052f..a47b923c 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -4,19 +4,19 @@ In the interest of fostering an open, collaborative, and welcoming environment, we as contributors and maintainers of nf-core, pledge to making participation in our projects and community a harassment-free experience for everyone, regardless of: -- Age -- Body size -- Familial status -- Gender identity and expression -- Geographical location -- Level of experience -- Nationality and national origins -- Native language -- Physical and neurological ability -- Race or ethnicity -- Religion -- Sexual identity and orientation -- Socioeconomic status +- Age +- Body size +- Familial status +- Gender identity and expression +- Geographical location +- Level of experience +- Nationality and national origins +- Native language +- Physical and neurological ability +- Race or ethnicity +- Religion +- Sexual identity and orientation +- Socioeconomic status Please note that the list above is alphabetised and is therefore not ranked in any order of preference or importance. @@ -48,38 +48,38 @@ Members of the core team or the safety officer who violate the CoC will be requi Participation in the nf-core community is contingent on following these guidelines in all our workspaces and events. This includes but is not limited to the following listed alphabetically and therefore in no order of preference: -- Communicating with an official project email address. -- Communicating with community members within the nf-core Slack channel. -- Participating in hackathons organised by nf-core (both online and in-person events). -- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence. -- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, Jitsi, YouTube live etc. -- Representing nf-core on social media. This includes both official and personal accounts. +- Communicating with an official project email address. +- Communicating with community members within the nf-core Slack channel. +- Participating in hackathons organised by nf-core (both online and in-person events). +- Participating in collaborative work on GitHub, Google Suite, community calls, mentorship meetings, email correspondence. +- Participating in workshops, training, and seminar series organised by nf-core (both online and in-person events). This applies to events hosted on web-based platforms such as Zoom, Jitsi, YouTube live etc. +- Representing nf-core on social media. This includes both official and personal accounts. ## nf-core cares 😊 nf-core's CoC and expectations of respectful behaviours for all participants (including organisers and the nf-core team) include but are not limited to the following (listed in alphabetical order): -- Ask for consent before sharing another community member’s personal information (including photographs) on social media. -- Be respectful of differing viewpoints and experiences. We are all here to learn from one another and a difference in opinion can present a good learning opportunity. -- Celebrate your accomplishments at events! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !) -- Demonstrate empathy towards other community members. (We don’t all have the same amount of time to dedicate to nf-core. If tasks are pending, don’t hesitate to gently remind members of your team. If you are leading a task, ask for help if you feel overwhelmed.) -- Engage with and enquire after others. (This is especially important given the geographically remote nature of the nf-core community, so let’s do this the best we can) -- Focus on what is best for the team and the community. (When in doubt, ask) -- Graciously accept constructive criticism, yet be unafraid to question, deliberate, and learn. -- Introduce yourself to members of the community. (We’ve all been outsiders and we know that talking to strangers can be hard for some, but remember we’re interested in getting to know you and your visions for open science!) -- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communications to be kind.**) -- Take breaks when you feel like you need them. -- Using welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack.) +- Ask for consent before sharing another community member’s personal information (including photographs) on social media. +- Be respectful of differing viewpoints and experiences. We are all here to learn from one another and a difference in opinion can present a good learning opportunity. +- Celebrate your accomplishments at events! (Get creative with your use of emojis 🎉 🥳 💯 🙌 !) +- Demonstrate empathy towards other community members. (We don’t all have the same amount of time to dedicate to nf-core. If tasks are pending, don’t hesitate to gently remind members of your team. If you are leading a task, ask for help if you feel overwhelmed.) +- Engage with and enquire after others. (This is especially important given the geographically remote nature of the nf-core community, so let’s do this the best we can) +- Focus on what is best for the team and the community. (When in doubt, ask) +- Graciously accept constructive criticism, yet be unafraid to question, deliberate, and learn. +- Introduce yourself to members of the community. (We’ve all been outsiders and we know that talking to strangers can be hard for some, but remember we’re interested in getting to know you and your visions for open science!) +- Show appreciation and **provide clear feedback**. (This is especially important because we don’t see each other in person and it can be harder to interpret subtleties. Also remember that not everyone understands a certain language to the same extent as you do, so **be clear in your communications to be kind.**) +- Take breaks when you feel like you need them. +- Using welcoming and inclusive language. (Participants are encouraged to display their chosen pronouns on Zoom or in communication on Slack.) ## nf-core frowns on 😕 The following behaviours from any participants within the nf-core community (including the organisers) will be considered unacceptable under this code of conduct. Engaging or advocating for any of the following could result in expulsion from nf-core workspaces. -- Deliberate intimidation, stalking or following and sustained disruption of communication among participants of the community. This includes hijacking shared screens through actions such as using the annotate tool in conferencing software such as Zoom. -- “Doxing” i.e. posting (or threatening to post) another person’s personal identifying information online. -- Spamming or trolling of individuals on social media. -- Use of sexual or discriminatory imagery, comments, or jokes and unwelcome sexual attention. -- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion or work experience. +- Deliberate intimidation, stalking or following and sustained disruption of communication among participants of the community. This includes hijacking shared screens through actions such as using the annotate tool in conferencing software such as Zoom. +- “Doxing” i.e. posting (or threatening to post) another person’s personal identifying information online. +- Spamming or trolling of individuals on social media. +- Use of sexual or discriminatory imagery, comments, or jokes and unwelcome sexual attention. +- Verbal and text comments that reinforce social structures of domination related to gender, gender identity and expression, sexual orientation, ability, physical appearance, body size, race, age, religion or work experience. ### Online Trolling @@ -99,13 +99,13 @@ All reports will be handled with utmost discretion and confidentially. ## Attribution and Acknowledgements -- The [Contributor Covenant, version 1.4](http://contributor-covenant.org/version/1/4) -- The [OpenCon 2017 Code of Conduct](http://www.opencon2017.org/code_of_conduct) (CC BY 4.0 OpenCon organisers, SPARC and Right to Research Coalition) -- The [eLife innovation sprint 2020 Code of Conduct](https://sprint.elifesciences.org/code-of-conduct/) -- The [Mozilla Community Participation Guidelines v3.1](https://www.mozilla.org/en-US/about/governance/policies/participation/) (version 3.1, CC BY-SA 3.0 Mozilla) +- The [Contributor Covenant, version 1.4](http://contributor-covenant.org/version/1/4) +- The [OpenCon 2017 Code of Conduct](http://www.opencon2017.org/code_of_conduct) (CC BY 4.0 OpenCon organisers, SPARC and Right to Research Coalition) +- The [eLife innovation sprint 2020 Code of Conduct](https://sprint.elifesciences.org/code-of-conduct/) +- The [Mozilla Community Participation Guidelines v3.1](https://www.mozilla.org/en-US/about/governance/policies/participation/) (version 3.1, CC BY-SA 3.0 Mozilla) ## Changelog ### v1.0 - March 12th, 2021 -- Complete rewrite from original [Contributor Covenant](http://contributor-covenant.org/) CoC. +- Complete rewrite from original [Contributor Covenant](http://contributor-covenant.org/) CoC. diff --git a/README.md b/README.md index 67724a3f..5ccfa0e7 100644 --- a/README.md +++ b/README.md @@ -19,20 +19,22 @@ ## Introduction + **nf-core/scrnaseq** is a bioinformatics best-practice analysis pipeline for processing 10x Genomics single-cell RNA-seq data. The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community! + On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/scrnaseq/results). ## Pipeline summary This is a community effort in building a pipeline capable to support: -* Alevin + AlevinQC -* STARSolo -* Kallisto + BUStools +- Alevin + AlevinQC +- STARSolo +- Kallisto + BUStools ## Documentation @@ -52,10 +54,10 @@ The nf-core/scrnaseq pipeline comes with documentation about the pipeline [usage Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string. - > * The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`. - > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. - > * If you are using `singularity` and are persistently observing issues downloading Singularity images directly due to timeout or network issues, then you can use the `--singularity_pull_docker_container` parameter to pull and convert the Docker image instead. Alternatively, you can use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs. - > * If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`. + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + > - If you are using `singularity` and are persistently observing issues downloading Singularity images directly due to timeout or network issues, then you can use the `--singularity_pull_docker_container` parameter to pull and convert the Docker image instead. Alternatively, you can use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs. + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. 4. Start running your own analysis! @@ -69,9 +71,9 @@ The `nf-core/scrnaseq` was initiated by [Peter J. Bailey](https://github.com/Pet We thank the following people for their extensive assistance in the development of this pipeline: -* @KevinMenden -* @ggabernet -* @FloWuenne +- @KevinMenden +- @ggabernet +- @FloWuenne ## Contributions and Support @@ -81,7 +83,7 @@ For further information or help, don't hesitate to get in touch on the [Slack `# ## Citations -If you use nf-core/scrnaseq for your analysis, please cite it using the following doi: [10.5281/zenodo.3568187](https://doi.org/10.5281/10.5281/zenodo.3568187) +If you use nf-core/scrnaseq for your analysis, please cite it using the following doi: [10.5281/zenodo.3568187](https://doi.org/10.5281/10.5281/zenodo.3568187) The basic benchmarks that were used as motivation for incorporating the three available modular workflows can be found in [this publication](https://www.biorxiv.org/content/10.1101/673285v2). diff --git a/assets/email_template.html b/assets/email_template.html index b5c9a7b9..a3f377ab 100644 --- a/assets/email_template.html +++ b/assets/email_template.html @@ -1,53 +1,113 @@ - - - - + + + + - - nf-core/scrnaseq Pipeline Report - - -
+ + nf-core/scrnaseq Pipeline Report + + +
+ - +

nf-core/scrnaseq v${version}

+

Run Name: $runName

-

nf-core/scrnaseq v${version}

-

Run Name: $runName

- -<% if (!success){ - out << """ -
-

nf-core/scrnaseq execution completed unsuccessfully!

+ <% if (!success){ out << """ +
+

nf-core/scrnaseq execution completed unsuccessfully!

The exit status of the task that caused the workflow execution to fail was: $exitStatus.

The full error message was:

-
${errorReport}
-
- """ -} else { - out << """ -
+
${errorReport}
+
+ """ } else { out << """ +
nf-core/scrnaseq execution completed successfully! -
- """ -} -%> +
+ """ } %> -

The workflow was completed at $dateComplete (duration: $duration)

-

The command used to launch the workflow was as follows:

-
$commandLine
+

The workflow was completed at $dateComplete (duration: $duration)

+

The command used to launch the workflow was as follows:

+
+$commandLine
-

Pipeline Configuration:

- - - <% out << summary.collect{ k,v -> "" }.join("\n") %> - -
$k
$v
+

Pipeline Configuration:

+ + + <% out << summary.collect{ k,v -> " + + + + + " }.join("\n") %> + +
+ $k + +
$v
+
-

nf-core/scrnaseq

-

https://github.com/nf-core/scrnaseq

- -
- - +

nf-core/scrnaseq

+

https://github.com/nf-core/scrnaseq

+
+ diff --git a/assets/multiqc_config.yaml b/assets/multiqc_config.yaml index 2d214243..a9f2585c 100644 --- a/assets/multiqc_config.yaml +++ b/assets/multiqc_config.yaml @@ -1,11 +1,11 @@ report_comment: > - This report has been generated by the nf-core/scrnaseq - analysis pipeline. For information about how to interpret these results, please see the - documentation. + This report has been generated by the nf-core/scrnaseq + analysis pipeline. For information about how to interpret these results, please see the + documentation. report_section_order: - software_versions: - order: -1000 - nf-core-scrnaseq-summary: - order: -1001 + software_versions: + order: -1000 + nf-core-scrnaseq-summary: + order: -1001 export_plots: true diff --git a/conf/multiqc_config.yaml b/conf/multiqc_config.yaml index 751e09cc..7bcca017 100644 --- a/conf/multiqc_config.yaml +++ b/conf/multiqc_config.yaml @@ -1,7 +1,7 @@ report_comment: > - This report has been generated by the nf-core/scrnaseq - analysis pipeline. For information about how to interpret these results, please see the - documentation. + This report has been generated by the nf-core/scrnaseq + analysis pipeline. For information about how to interpret these results, please see the + documentation. report_section_order: - nf-core/scrnaseq-software-versions: - order: -1000 + nf-core/scrnaseq-software-versions: + order: -1000 diff --git a/docs/README.md b/docs/README.md index ddc942b3..6b09a1ff 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,9 +2,9 @@ The nf-core/scrnaseq documentation is split into the following pages: -- [Usage](usage.md) - - An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. -- [Output](output.md) - - An overview of the different results produced by the pipeline and how to interpret them. +- [Usage](usage.md) + - An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. +- [Output](output.md) + - An overview of the different results produced by the pipeline and how to interpret them. You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re) diff --git a/docs/output.md b/docs/output.md index 0042d0ea..a9d454c3 100644 --- a/docs/output.md +++ b/docs/output.md @@ -8,16 +8,16 @@ This document describes the output produced by the pipeline. Most of the plots a The pipeline is built using [Nextflow](https://www.nextflow.io/) and processes data using the following steps: -* [nf-core/scrnaseq: Output](#nf-corescrnaseq-output) - * [:warning: Please read this documentation on the nf-core website: https://nf-co.re/scrnaseq/output](#warning-please-read-this-documentation-on-the-nf-core-website-httpsnf-corescrnaseqoutput) - * [Introduction](#introduction) - * [Pipeline overview](#pipeline-overview) - * [Kallisto & Bustools Results](#kallisto--bustools-results) - * [STARsolo](#starsolo) - * [Salmon Alevin & AlevinQC](#salmon-alevin--alevinqc) - * [Other output data](#other-output-data) - * [MultiQC](#multiqc) - * [Pipeline information](#pipeline-information) +- [nf-core/scrnaseq: Output](#nf-corescrnaseq-output) + - [:warning: Please read this documentation on the nf-core website: https://nf-co.re/scrnaseq/output](#warning-please-read-this-documentation-on-the-nf-core-website-httpsnf-corescrnaseqoutput) + - [Introduction](#introduction) + - [Pipeline overview](#pipeline-overview) + - [Kallisto & Bustools Results](#kallisto--bustools-results) + - [STARsolo](#starsolo) + - [Salmon Alevin & AlevinQC](#salmon-alevin--alevinqc) + - [Other output data](#other-output-data) + - [MultiQC](#multiqc) + - [Pipeline information](#pipeline-information) ## Kallisto & Bustools Results @@ -27,72 +27,71 @@ The pipeline can analyze data from single cell rnaseq experiments and generates **Output directory: `results/kallisto`** -* `raw_bus` - * Contains the unconverted BUS formatted pseudo aligned data -* `sort_bus` - * Contains the same BUS formatted data, sorted and corrected with the supplied barcode whitelist -* `kallisto_gene_map` - * Contains the converted GTF gene map that is used by BUSTools for downstream analysis -* `bustools_counts` - * Contains two subdirectories - * `eqcount`: Containing the Transcript Compatibility Count (TCC) Matrix (`tcc.mtx`) - * `genecount`: Containing the Gene Count Matrix (`gene.mtx`) -* `bustools_metrics` - * Contains the JSON metrics generated by BUStools +- `raw_bus` + - Contains the unconverted BUS formatted pseudo aligned data +- `sort_bus` + - Contains the same BUS formatted data, sorted and corrected with the supplied barcode whitelist +- `kallisto_gene_map` + - Contains the converted GTF gene map that is used by BUSTools for downstream analysis +- `bustools_counts` + - Contains two subdirectories + - `eqcount`: Containing the Transcript Compatibility Count (TCC) Matrix (`tcc.mtx`) + - `genecount`: Containing the Gene Count Matrix (`gene.mtx`) +- `bustools_metrics` \* Contains the JSON metrics generated by BUStools For details on how to load these into R and perform further downstream analysis, please refer to the [BusTools HowTo](https://github.com/BUStools/getting_started/blob/master/getting_started.ipynb). **Output directory: `results/reference_genome`** -* `kallisto_index` - * Contains the index of the supplied (genome/transcriptome) fasta file +- `kallisto_index` + - Contains the index of the supplied (genome/transcriptome) fasta file ## STARsolo **Output directory: `results/STAR`** -* Contains the mapped BAM files and output metrics created by STARsolo +- Contains the mapped BAM files and output metrics created by STARsolo **Output directory: `results/reference_genome`** -* `star_index` - * Contains the index of the supplied genome fasta file +- `star_index` + - Contains the index of the supplied genome fasta file ## Salmon Alevin & AlevinQC **Output directory: `results/alevin`** -* `alevin` - * Contains the created Salmon Alevin pseudo-aligned output -* `alevinqc` - * Contains the QC report for the aforementioned Salmon Alevin output data +- `alevin` + - Contains the created Salmon Alevin pseudo-aligned output +- `alevinqc` + - Contains the QC report for the aforementioned Salmon Alevin output data **Output directory: `results/reference_genome`** -* `salmon_index` - * Contains the indexed reference transcriptome for Salmon Alevin -* `alevin/txp2gene.tsv` - * The transcriptome to gene mapping TSV file utilized by Salmon Alevin +- `salmon_index` + - Contains the indexed reference transcriptome for Salmon Alevin +- `alevin/txp2gene.tsv` + - The transcriptome to gene mapping TSV file utilized by Salmon Alevin ## Other output data **Output directory: `results/reference_genome`** -* `barcodes` - * Contains the utilized cell barcode whitelists (if applicable) -* `extract_transcriptome` - * When supplied with a `--fasta` genome fasta, this contains the extracted transcriptome - * The GTF file supplied with `--gtf` is used to extract the transcriptome positions appropriately +- `barcodes` + - Contains the utilized cell barcode whitelists (if applicable) +- `extract_transcriptome` + - When supplied with a `--fasta` genome fasta, this contains the extracted transcriptome + - The GTF file supplied with `--gtf` is used to extract the transcriptome positions appropriately ## MultiQC
Output files -* `multiqc/` - * `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. - * `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. - * `multiqc_plots/`: directory containing static images from the report in various formats. +- `multiqc/` + - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. + - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. + - `multiqc_plots/`: directory containing static images from the report in various formats.
@@ -105,10 +104,10 @@ Results generated by MultiQC collate pipeline QC from supported tools e.g. FastQ
Output files -* `pipeline_info/` - * Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. - * Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline. - * Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`. +- `pipeline_info/` + - Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. + - Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline. + - Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`.
diff --git a/docs/usage.md b/docs/usage.md index 460c804b..c30f64ee 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -32,11 +32,11 @@ sample,fastq_1,fastq_2 test,https://github.com/nf-core/test-datasets/raw/scrnaseq/testdata/S10_L001_R1_001.fastq.gz,https://github.com/nf-core/test-datasets/raw/scrnaseq/testdata/S10_L001_R2_001.fastq.gz ``` -| Column | Description | -|----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `sample` | Custom sample name. This entry will be identical for multiple sequencing libraries/runs from the same sample. Spaces in sample names are automatically converted to underscores (`_`). | -| `fastq_1` | Full path to FastQ file for Illumina short reads 1. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". | -| `fastq_2` | Full path to FastQ file for Illumina short reads 2. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". | +| Column | Description | +| --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `sample` | Custom sample name. This entry will be identical for multiple sequencing libraries/runs from the same sample. Spaces in sample names are automatically converted to underscores (`_`). | +| `fastq_1` | Full path to FastQ file for Illumina short reads 1. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". | +| `fastq_2` | Full path to FastQ file for Illumina short reads 2. File has to be gzipped and have the extension ".fastq.gz" or ".fq.gz". | An [example samplesheet](../assets/samplesheet.csv) has been provided with the pipeline. @@ -96,21 +96,21 @@ They are loaded in sequence, so later profiles can overwrite earlier profiles. If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended. -* `docker` - * A generic configuration profile to be used with [Docker](https://docker.com/) -* `singularity` - * A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/) -* `podman` - * A generic configuration profile to be used with [Podman](https://podman.io/) -* `shifter` - * A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) -* `charliecloud` - * A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) -* `conda` - * A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud. -* `test` - * A profile with a complete configuration for automated testing - * Includes links to test data so needs no other parameters +- `docker` + - A generic configuration profile to be used with [Docker](https://docker.com/) +- `singularity` + - A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/) +- `podman` + - A generic configuration profile to be used with [Podman](https://podman.io/) +- `shifter` + - A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) +- `charliecloud` + - A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) +- `conda` + - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud. +- `test` + - A profile with a complete configuration for automated testing + - Includes links to test data so needs no other parameters ### `-resume` @@ -159,7 +159,7 @@ Work dir: Tip: you can replicate the issue by changing to the process work dir and entering the command `bash .command.run` ``` -To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN). We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so based on the search results the file we want is `modules/nf-core/software/star/align/main.nf`. If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9). The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements. The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB. Providing you haven't set any other standard nf-core parameters to __cap__ the [maximum resources](https://nf-co.re/usage/configuration#max-resources) used by the pipeline then we can try and bypass the `STAR_ALIGN` process failure by creating a custom config file that sets at least 72GB of memory, in this case increased to 100GB. The custom config below can then be provided to the pipeline via the [`-c`](#-c) parameter as highlighted in previous sections. +To bypass this error you would need to find exactly which resources are set by the `STAR_ALIGN` process. The quickest way is to search for `process STAR_ALIGN` in the [nf-core/rnaseq Github repo](https://github.com/nf-core/rnaseq/search?q=process+STAR_ALIGN). We have standardised the structure of Nextflow DSL2 pipelines such that all module files will be present in the `modules/` directory and so based on the search results the file we want is `modules/nf-core/software/star/align/main.nf`. If you click on the link to that file you will notice that there is a `label` directive at the top of the module that is set to [`label process_high`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L9). The [Nextflow `label`](https://www.nextflow.io/docs/latest/process.html#label) directive allows us to organise workflow processes in separate groups which can be referenced in a configuration file to select and configure subset of processes having similar computing requirements. The default values for the `process_high` label are set in the pipeline's [`base.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/base.config#L33-L37) which in this case is defined as 72GB. Providing you haven't set any other standard nf-core parameters to **cap** the [maximum resources](https://nf-co.re/usage/configuration#max-resources) used by the pipeline then we can try and bypass the `STAR_ALIGN` process failure by creating a custom config file that sets at least 72GB of memory, in this case increased to 100GB. The custom config below can then be provided to the pipeline via the [`-c`](#-c) parameter as highlighted in previous sections. ```nextflow process { @@ -179,7 +179,7 @@ The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementatio 2. Find the latest version of the Biocontainer available on [Quay.io](https://quay.io/repository/biocontainers/pangolin?tag=latest&tab=tags) 3. Create the custom config accordingly: - * For Docker: + - For Docker: ```nextflow process { @@ -189,7 +189,7 @@ The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementatio } ``` - * For Singularity: + - For Singularity: ```nextflow process { @@ -199,7 +199,7 @@ The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementatio } ``` - * For Conda: + - For Conda: ```nextflow process { diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml b/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml index 5b5b8a60..60b546a0 100644 --- a/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml +++ b/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml @@ -8,7 +8,7 @@ tools: description: Custom module used to dump software versions within the nf-core pipeline template homepage: https://github.com/nf-core/tools documentation: https://github.com/nf-core/tools - licence: ['MIT'] + licence: ["MIT"] input: - versions: type: file diff --git a/modules/nf-core/modules/fastqc/meta.yml b/modules/nf-core/modules/fastqc/meta.yml index b09553a3..4da5bb5a 100644 --- a/modules/nf-core/modules/fastqc/meta.yml +++ b/modules/nf-core/modules/fastqc/meta.yml @@ -1,52 +1,52 @@ name: fastqc description: Run FastQC on sequenced reads keywords: - - quality control - - qc - - adapters - - fastq + - quality control + - qc + - adapters + - fastq tools: - - fastqc: - description: | - FastQC gives general quality metrics about your reads. - It provides information about the quality score distribution - across your reads, the per base sequence content (%A/C/G/T). - You get information about adapter contamination and other - overrepresented sequences. - homepage: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/ - documentation: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/ - licence: ['GPL-2.0-only'] + - fastqc: + description: | + FastQC gives general quality metrics about your reads. + It provides information about the quality score distribution + across your reads, the per base sequence content (%A/C/G/T). + You get information about adapter contamination and other + overrepresented sequences. + homepage: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/ + documentation: https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/ + licence: ["GPL-2.0-only"] input: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - reads: - type: file - description: | - List of input FastQ files of size 1 and 2 for single-end and paired-end data, - respectively. + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - reads: + type: file + description: | + List of input FastQ files of size 1 and 2 for single-end and paired-end data, + respectively. output: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'test', single_end:false ] - - html: - type: file - description: FastQC report - pattern: "*_{fastqc.html}" - - zip: - type: file - description: FastQC report archive - pattern: "*_{fastqc.zip}" - - versions: - type: file - description: File containing software versions - pattern: "versions.yml" + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'test', single_end:false ] + - html: + type: file + description: FastQC report + pattern: "*_{fastqc.html}" + - zip: + type: file + description: FastQC report archive + pattern: "*_{fastqc.zip}" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" authors: - - "@drpatelh" - - "@grst" - - "@ewels" - - "@FelixKrueger" + - "@drpatelh" + - "@grst" + - "@ewels" + - "@FelixKrueger" diff --git a/modules/nf-core/modules/gffread/meta.yml b/modules/nf-core/modules/gffread/meta.yml index bf1a15cb..20335747 100644 --- a/modules/nf-core/modules/gffread/meta.yml +++ b/modules/nf-core/modules/gffread/meta.yml @@ -11,13 +11,13 @@ tools: documentation: http://ccb.jhu.edu/software/stringtie/gff.shtml#gffread tool_dev_url: https://github.com/gpertea/gffread doi: 10.12688/f1000research.23297.1 - licence: ['MIT'] + licence: ["MIT"] input: - gff: - type: file - description: A reference file in either the GFF3, GFF2 or GTF format. - pattern: "*.{gff, gtf}" + type: file + description: A reference file in either the GFF3, GFF2 or GTF format. + pattern: "*.{gff, gtf}" output: - gtf: diff --git a/modules/nf-core/modules/gunzip/meta.yml b/modules/nf-core/modules/gunzip/meta.yml index ea1f1546..2e0e4054 100644 --- a/modules/nf-core/modules/gunzip/meta.yml +++ b/modules/nf-core/modules/gunzip/meta.yml @@ -1,34 +1,34 @@ name: gunzip description: Compresses and decompresses files. keywords: - - gunzip - - compression + - gunzip + - compression tools: - - gunzip: - description: | - gzip is a file format and a software application used for file compression and decompression. - documentation: https://www.gnu.org/software/gzip/manual/gzip.html - licence: ['GPL-3.0-or-later'] + - gunzip: + description: | + gzip is a file format and a software application used for file compression and decompression. + documentation: https://www.gnu.org/software/gzip/manual/gzip.html + licence: ["GPL-3.0-or-later"] input: - - meta: - type: map - description: | - Optional groovy Map containing meta information - e.g. [ id:'test', single_end:false ] - - archive: - type: file - description: File to be compressed/uncompressed - pattern: "*.*" + - meta: + type: map + description: | + Optional groovy Map containing meta information + e.g. [ id:'test', single_end:false ] + - archive: + type: file + description: File to be compressed/uncompressed + pattern: "*.*" output: - - gunzip: - type: file - description: Compressed/uncompressed file - pattern: "*.*" - - versions: - type: file - description: File containing software versions - pattern: "versions.yml" + - gunzip: + type: file + description: Compressed/uncompressed file + pattern: "*.*" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" authors: - - "@joseespinosa" - - "@drpatelh" - - "@jfy133" + - "@joseespinosa" + - "@drpatelh" + - "@jfy133" diff --git a/modules/nf-core/modules/multiqc/meta.yml b/modules/nf-core/modules/multiqc/meta.yml index 63c75a45..6fa891ef 100644 --- a/modules/nf-core/modules/multiqc/meta.yml +++ b/modules/nf-core/modules/multiqc/meta.yml @@ -1,40 +1,40 @@ name: MultiQC description: Aggregate results from bioinformatics analyses across many samples into a single report keywords: - - QC - - bioinformatics tools - - Beautiful stand-alone HTML report + - QC + - bioinformatics tools + - Beautiful stand-alone HTML report tools: - - multiqc: - description: | - MultiQC searches a given directory for analysis logs and compiles a HTML report. - It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. - homepage: https://multiqc.info/ - documentation: https://multiqc.info/docs/ - licence: ['GPL-3.0-or-later'] + - multiqc: + description: | + MultiQC searches a given directory for analysis logs and compiles a HTML report. + It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. + homepage: https://multiqc.info/ + documentation: https://multiqc.info/docs/ + licence: ["GPL-3.0-or-later"] input: - - multiqc_files: - type: file - description: | - List of reports / files recognised by MultiQC, for example the html and zip output of FastQC + - multiqc_files: + type: file + description: | + List of reports / files recognised by MultiQC, for example the html and zip output of FastQC output: - - report: - type: file - description: MultiQC report file - pattern: "multiqc_report.html" - - data: - type: dir - description: MultiQC data dir - pattern: "multiqc_data" - - plots: - type: file - description: Plots created by MultiQC - pattern: "*_data" - - versions: - type: file - description: File containing software versions - pattern: "versions.yml" + - report: + type: file + description: MultiQC report file + pattern: "multiqc_report.html" + - data: + type: dir + description: MultiQC data dir + pattern: "multiqc_data" + - plots: + type: file + description: Plots created by MultiQC + pattern: "*_data" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" authors: - - "@abhi18av" - - "@bunop" - - "@drpatelh" + - "@abhi18av" + - "@bunop" + - "@drpatelh" diff --git a/modules/nf-core/modules/salmon/index/meta.yml b/modules/nf-core/modules/salmon/index/meta.yml index 3b0cd853..53c64152 100644 --- a/modules/nf-core/modules/salmon/index/meta.yml +++ b/modules/nf-core/modules/salmon/index/meta.yml @@ -12,7 +12,7 @@ tools: homepage: https://salmon.readthedocs.io/en/latest/salmon.html manual: https://salmon.readthedocs.io/en/latest/salmon.html doi: 10.1038/nmeth.4197 - licence: ['GPL-3.0-or-later'] + licence: ["GPL-3.0-or-later"] input: - genome_fasta: type: file diff --git a/modules/nf-core/modules/salmon/quant/meta.yml b/modules/nf-core/modules/salmon/quant/meta.yml index 223ca82b..109109d8 100644 --- a/modules/nf-core/modules/salmon/quant/meta.yml +++ b/modules/nf-core/modules/salmon/quant/meta.yml @@ -12,7 +12,7 @@ tools: homepage: https://salmon.readthedocs.io/en/latest/salmon.html manual: https://salmon.readthedocs.io/en/latest/salmon.html doi: 10.1038/nmeth.4197 - licence: ['GPL-3.0-or-later'] + licence: ["GPL-3.0-or-later"] input: - meta: type: map diff --git a/modules/nf-core/modules/star/genomegenerate/meta.yml b/modules/nf-core/modules/star/genomegenerate/meta.yml index 04ade195..8181157a 100644 --- a/modules/nf-core/modules/star/genomegenerate/meta.yml +++ b/modules/nf-core/modules/star/genomegenerate/meta.yml @@ -13,7 +13,7 @@ tools: homepage: https://github.com/alexdobin/STAR manual: https://github.com/alexdobin/STAR/blob/master/doc/STARmanual.pdf doi: 10.1093/bioinformatics/bts635 - licence: ['MIT'] + licence: ["MIT"] input: - fasta: type: file diff --git a/nextflow_schema.json b/nextflow_schema.json index 86921942..9470a401 100644 --- a/nextflow_schema.json +++ b/nextflow_schema.json @@ -54,23 +54,13 @@ "default": "alevin", "help_text": "The workflow can handle three types of methods:\n\n- Kallisto/Bustools\n- Salmon Alevin + AlevinQC\n- STARsolo\n\nTo choose which one to use, please specify either `alevin`, `star` or `kallisto` as a parameter option for `--aligner`. By default, the pipeline runs the `alevin` option. Note that specifying another aligner option also requires choosing appropriate parameters (see below) for the selected option.", "fa_icon": "fas fa-align-center", - "enum": [ - "kallisto", - "star", - "alevin", - "cellranger" - ] + "enum": ["kallisto", "star", "alevin", "cellranger"] }, "protocol": { "type": "string", "default": "10XV2", "fa_icon": "fas fa-cogs", - "enum": [ - "10XV3", - "10XV2", - "10XV1", - "dropseq" - ] + "enum": ["10XV3", "10XV2", "10XV1", "dropseq"] } }, "fa_icon": "fas fa-terminal" @@ -412,4 +402,4 @@ "type": "string" } } -} \ No newline at end of file +}