diff --git a/.github/markdownlint.yml b/.github/markdownlint.yml index 96b12a70..cc8e0d33 100644 --- a/.github/markdownlint.yml +++ b/.github/markdownlint.yml @@ -3,3 +3,7 @@ default: true, line-length: false no-duplicate-header: siblings_only: true +MD033: + allowed_elements: [details, summary, p, img] +MD007: + indent: 4 diff --git a/.github/workflows/awsfulltest.yml b/.github/workflows/awsfulltest.yml new file mode 100644 index 00000000..1cc3a9d5 --- /dev/null +++ b/.github/workflows/awsfulltest.yml @@ -0,0 +1,36 @@ +name: nf-core AWS full test +# This workflow is triggered on releases. +# It runs the -profile 'test_full' on AWS batch + +on: + release: + types: [published] + +jobs: + run-awstest: + if: github.repository == 'nf-core/viralrecon' + name: Run AWS test + runs-on: ubuntu-latest + steps: + - name: Setup Miniconda + uses: goanpeca/setup-miniconda@v1.0.2 + with: + auto-update-conda: true + python-version: 3.7 + - name: Install awscli + run: conda install -c conda-forge awscli + - name: Start AWS batch job + env: + AWS_ACCESS_KEY_ID: ${{secrets.AWS_ACCESS_KEY_ID}} + AWS_SECRET_ACCESS_KEY: ${{secrets.AWS_SECRET_ACCESS_KEY}} + TOWER_ACCESS_TOKEN: ${{secrets.AWS_TOWER_TOKEN}} + #AWS_JOB_DEFINITION: ${{secrets.AWS_JOB_DEFINITION}} + AWS_JOB_QUEUE: ${{secrets.AWS_JOB_QUEUE}} + AWS_S3_BUCKET: ${{secrets.AWS_S3_BUCKET}} + run: | # Submits job to AWS batch using a 'nextflow-4GiB' job definition. Setting JVM options to "-XX:+UseG1GC" for more efficient garbage collection when staging remote files. + aws batch submit-job \ + --region eu-west-1 \ + --job-name nf-core-viralrecon \ + --job-queue $AWS_JOB_QUEUE \ + --job-definition nextflow-4GiB \ + --container-overrides '{"command": ["nf-core/viralrecon", "-r '"${GITHUB_SHA}"' -profile test_full --outdir s3://'"${AWS_S3_BUCKET}"'/viralrecon/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/viralrecon/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}, {"name": "NXF_OPTS", "value": "-XX:+UseG1GC"}]}' \ No newline at end of file diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml index 5ccf4182..f9533cb2 100644 --- a/.github/workflows/awstest.yml +++ b/.github/workflows/awstest.yml @@ -1,16 +1,15 @@ name: nf-core AWS test -# This workflow is triggered on PRs to the master branch. -# It runs the -profile 'test_full' on AWS batch +# This workflow is triggered on push to the master branch. +# It runs the -profile 'test' on AWS batch on: push: branches: - master - release: - types: [published] - + - dev # only for testing purposes, to be removed jobs: run-awstest: + if: github.repository == 'nf-core/viralrecon' name: Run AWS test runs-on: ubuntu-latest steps: @@ -23,8 +22,16 @@ jobs: run: conda install -c conda-forge awscli - name: Start AWS batch job env: - AWS_ACCESS_KEY_ID: ${{secrets.AWS_KEY_ID}} - AWS_SECRET_ACCESS_KEY: ${{secrets.AWS_KEY_SECRET}} - TOWER_ACCESS_TOKEN: ${{secrets.TOWER_ACCESS_TOKEN}} - run: | # Submits job to AWS batch using a 'nextflow-big' instance. Setting JVM options to "-XX:+UseG1GC" for more efficient garbage collection when staging remote files. - aws batch submit-job --region eu-west-1 --job-name nf-core-viralrecon --job-queue 'default-8b3836e0-5eda-11ea-96e5-0a2c3f6a2a32' --job-definition nextflow-4GiB --container-overrides '{"command": ["nf-core/viralrecon", "-r '"${GITHUB_SHA}"' -profile test_full --outdir s3://nf-core-awsmegatests/viralrecon/results-'"${GITHUB_SHA}"' -w s3://nf-core-awsmegatests/viralrecon/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}, {"name": "NXF_OPTS", "value": "-XX:+UseG1GC"}]}' + AWS_ACCESS_KEY_ID: ${{secrets.AWS_ACCESS_KEY_ID}} + AWS_SECRET_ACCESS_KEY: ${{secrets.AWS_SECRET_ACCESS_KEY}} + TOWER_ACCESS_TOKEN: ${{secrets.AWS_TOWER_TOKEN}} + #AWS_JOB_DEFINITION: ${{secrets.AWS_JOB_DEFINITION}} + AWS_JOB_QUEUE: ${{secrets.AWS_JOB_QUEUE}} + AWS_S3_BUCKET: ${{secrets.AWS_S3_BUCKET}} + run: | # Submits job to AWS batch using a 'nextflow-4GiB' job definition. Setting JVM options to "-XX:+UseG1GC" for more efficient garbage collection when staging remote files. + aws batch submit-job \ + --region eu-west-1 \ + --job-name nf-core-viralrecon \ + --job-queue $AWS_JOB_QUEUE \ + --job-definition nextflow-4GiB \ + --container-overrides '{"command": ["nf-core/viralrecon", "-r '"${GITHUB_SHA}"' -profile test --outdir s3://'"${AWS_S3_BUCKET}"'/viralrecon/results-'"${GITHUB_SHA}"' -w s3://'"${AWS_S3_BUCKET}"'/viralrecon/work-'"${GITHUB_SHA}"' -with-tower"], "environment": [{"name": "TOWER_ACCESS_TOKEN", "value": "'"$TOWER_ACCESS_TOKEN"'"}, {"name": "NXF_OPTS", "value": "-XX:+UseG1GC"}]}' diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml index edd515eb..29f6994f 100644 --- a/.github/workflows/branch.yml +++ b/.github/workflows/branch.yml @@ -1,16 +1,16 @@ -name: nf-core branch protection -# This workflow is triggered on PRs to master branch on the repository -# It fails when someone tries to make a PR against the nf-core `master` branch instead of `dev` -on: - pull_request: - branches: - - master - -jobs: - test: - runs-on: ubuntu-18.04 - steps: - # PRs are only ok if coming from an nf-core `dev` branch or a fork `patch` branch - - name: Check PRs - run: | - { [[ $(git remote get-url origin) == *nf-core/viralrecon ]] && [[ ${GITHUB_HEAD_REF} = "dev" ]]; } || [[ ${GITHUB_HEAD_REF} == "patch" ]] +name: nf-core branch protection +# This workflow is triggered on PRs to master branch on the repository +# It fails when someone tries to make a PR against the nf-core `master` branch instead of `dev` +on: + pull_request: + branches: + - master + +jobs: + test: + runs-on: ubuntu-18.04 + steps: + # PRs are only ok if coming from an nf-core `dev` branch or a fork `patch` branch + - name: Check PRs + run: | + { [[ $(git remote get-url origin) == *nf-core/viralrecon ]] && [[ ${GITHUB_HEAD_REF} = "dev" ]]; } || [[ ${GITHUB_HEAD_REF} == "patch" ]] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2f031c0e..737a8f85 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,94 +1,205 @@ name: nf-core CI -# This workflow is triggered on pushes and PRs to the repository. +# This workflow is triggered on releases and pull-requests. # It runs the pipeline with the minimal test dataset to check that it completes without any syntax errors -on: [push, pull_request] +on: + push: + branches: + - dev + pull_request: + release: + types: [published] jobs: test: + name: Test default workflow + # Only run on push if this is the nf-core dev branch (merged PRs) + if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/viralrecon') }} + runs-on: ubuntu-latest env: NXF_VER: ${{ matrix.nxf_ver }} NXF_ANSI_LOG: false - runs-on: ubuntu-latest strategy: matrix: # Nextflow versions: check pipeline minimum and current latest nxf_ver: ['19.10.0', ''] steps: - - uses: actions/checkout@v2 + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Check if Dockerfile or Conda environment changed + uses: technote-space/get-diff-action@v1 + with: + PREFIX_FILTER: | + Dockerfile + environment.yml + + - name: Build new docker image + if: env.GIT_DIFF + run: docker build --no-cache . -t nfcore/viralrecon:1.1.0 + + - name: Pull docker image + if: ${{ !env.GIT_DIFF }} + run: | + docker pull nfcore/viralrecon:dev + docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.1.0 + - name: Install Nextflow run: | wget -qO- get.nextflow.io | bash sudo mv nextflow /usr/local/bin/ - - name: Pull docker image - run: | - docker pull nfcore/viralrecon:dev - docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.0.0 + - name: Run pipeline with test data run: | nextflow run ${GITHUB_WORKSPACE} -profile test,docker parameters: + name: Test workflow parameters + if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/viralrecon') }} + runs-on: ubuntu-latest env: NXF_VER: '19.10.0' NXF_ANSI_LOG: false - runs-on: ubuntu-latest strategy: matrix: - parameters: [--skip_adapter_trimming, --skip_markduplicates, --skip_variants, --skip_amplicon_trimming, --skip_kraken2, --skip_assembly] + parameters: [--skip_adapter_trimming, --skip_markduplicates, --skip_variants, --skip_amplicon_trimming, --skip_kraken2, --skip_assembly, '--callers ivar --assemblers spades'] steps: - - uses: actions/checkout@v2 + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Check if Dockerfile or Conda environment changed + uses: technote-space/get-diff-action@v1 + with: + PREFIX_FILTER: | + Dockerfile + environment.yml + + - name: Build new docker image + if: env.GIT_DIFF + run: docker build --no-cache . -t nfcore/viralrecon:1.1.0 + + - name: Pull docker image + if: ${{ !env.GIT_DIFF }} + run: | + docker pull nfcore/viralrecon:dev + docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.1.0 + - name: Install Nextflow run: | wget -qO- get.nextflow.io | bash sudo mv nextflow /usr/local/bin/ - - name: Pull docker image - run: | - docker pull nfcore/viralrecon:dev - docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.0.0 + - name: Run pipeline with test amplicon data with various options run: | nextflow run ${GITHUB_WORKSPACE} -profile test,docker ${{ matrix.parameters }} test_sra: - env: - NXF_VER: '19.10.0' - NXF_ANSI_LOG: false - runs-on: ubuntu-latest - strategy: - matrix: - parameters: [--skip_sra, ''] - steps: - - uses: actions/checkout@v2 - - name: Install Nextflow - run: | - wget -qO- get.nextflow.io | bash - sudo mv nextflow /usr/local/bin/ - - name: Pull docker image - run: | - docker pull nfcore/viralrecon:dev - docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.0.0 - - name: Run pipeline with minimal data via SRA ids and various options - run: | - nextflow run ${GITHUB_WORKSPACE} -profile test_sra,docker ${{ matrix.parameters }} + name: Test SRA workflow + if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/viralrecon') }} + runs-on: ubuntu-latest + env: + NXF_VER: '19.10.0' + NXF_ANSI_LOG: false + strategy: + matrix: + parameters: [--skip_sra, ''] + steps: + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Check if Dockerfile or Conda environment changed + uses: technote-space/get-diff-action@v1 + with: + PREFIX_FILTER: | + Dockerfile + environment.yml + + - name: Build new docker image + if: env.GIT_DIFF + run: docker build --no-cache . -t nfcore/viralrecon:1.1.0 + + - name: Pull docker image + if: ${{ !env.GIT_DIFF }} + run: | + docker pull nfcore/viralrecon:dev + docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.1.0 + + - name: Install Nextflow + run: | + wget -qO- get.nextflow.io | bash + sudo mv nextflow /usr/local/bin/ + + - name: Run pipeline with minimal data via SRA ids and various options + run: | + nextflow run ${GITHUB_WORKSPACE} -profile test_sra,docker ${{ matrix.parameters }} test_sispa: - env: - NXF_VER: '19.10.0' - NXF_ANSI_LOG: false - runs-on: ubuntu-latest - strategy: - matrix: - parameters: [--gff false, ''] - steps: - - uses: actions/checkout@v2 - - name: Install Nextflow - run: | - wget -qO- get.nextflow.io | bash - sudo mv nextflow /usr/local/bin/ - - name: Pull docker image - run: | - docker pull nfcore/viralrecon:dev - docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.0.0 - - name: Run pipeline with minimal SISPA data and various options - run: | - nextflow run ${GITHUB_WORKSPACE} -profile test_sispa,docker ${{ matrix.parameters }} + name: Test SISPA workflow + if: ${{ github.event_name != 'push' || (github.event_name == 'push' && github.repository == 'nf-core/viralrecon') }} + runs-on: ubuntu-latest + env: + NXF_VER: '19.10.0' + NXF_ANSI_LOG: false + strategy: + matrix: + parameters: [--gff false, ''] + steps: + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Check if Dockerfile or Conda environment changed + uses: technote-space/get-diff-action@v1 + with: + PREFIX_FILTER: | + Dockerfile + environment.yml + + - name: Build new docker image + if: env.GIT_DIFF + run: docker build --no-cache . -t nfcore/viralrecon:1.1.0 + + - name: Pull docker image + if: ${{ !env.GIT_DIFF }} + run: | + docker pull nfcore/viralrecon:dev + docker tag nfcore/viralrecon:dev nfcore/viralrecon:1.1.0 + + - name: Install Nextflow + run: | + wget -qO- get.nextflow.io | bash + sudo mv nextflow /usr/local/bin/ + + - name: Run pipeline with minimal SISPA data and various options + run: | + nextflow run ${GITHUB_WORKSPACE} -profile test_sispa,docker ${{ matrix.parameters }} + + push_dockerhub: + name: Push new Docker image to Docker Hub + runs-on: ubuntu-latest + # Only run if the tests passed + needs: test + # Only run for the nf-core repo, for releases and merged PRs + if: ${{ github.repository == 'nf-core/viralrecon' && (github.event_name == 'release' || github.event_name == 'push') }} + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASS: ${{ secrets.DOCKERHUB_PASS }} + steps: + - name: Check out pipeline code + uses: actions/checkout@v2 + + - name: Build new docker image + run: docker build --no-cache . -t nfcore/viralrecon:latest + + - name: Push Docker image to DockerHub (dev) + if: ${{ github.event_name == 'push' }} + run: | + echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin + docker tag nfcore/viralrecon:latest nfcore/viralrecon:dev + docker push nfcore/viralrecon:dev + + - name: Push Docker image to DockerHub (release) + if: ${{ github.event_name == 'release' }} + run: | + echo "$DOCKERHUB_PASS" | docker login -u "$DOCKERHUB_USERNAME" --password-stdin + docker push nfcore/viralrecon:latest + docker tag nfcore/viralrecon:latest nfcore/viralrecon:${{ github.ref }} + docker push nfcore/viralrecon:${{ github.ref }} diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 1e0827a8..eb66c144 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -33,18 +33,29 @@ jobs: nf-core: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + + - name: Check out pipeline code + uses: actions/checkout@v2 + - name: Install Nextflow run: | wget -qO- get.nextflow.io | bash sudo mv nextflow /usr/local/bin/ + - uses: actions/setup-python@v1 with: python-version: '3.6' architecture: 'x64' + - name: Install dependencies run: | python -m pip install --upgrade pip pip install nf-core + - name: Run nf-core lint + env: + GITHUB_COMMENTS_URL: ${{ github.event.pull_request.comments_url }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_PR_COMMIT: ${{ github.event.pull_request.head.sha }} run: nf-core lint ${GITHUB_WORKSPACE} + diff --git a/CHANGELOG.md b/CHANGELOG.md index 734d327b..f0420342 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,49 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.1.0] - 2020-06-23 + +### `Added` + +* [#112](https://github.com/nf-core/viralrecon/issues/112) - Per-amplicon coverage plot +* [#124](https://github.com/nf-core/viralrecon/issues/124) - Intersect variants across callers +* [nf-core/tools#616](https://github.com/nf-core/tools/pull/616) - Updated GitHub Actions to build Docker image and push to Docker Hub +* Parameters: + * `--min_mapped_reads` to circumvent failures for samples with low number of mapped reads + * `--varscan2_strand_filter` to toggle the default Varscan 2 strand filter + * `--skip_mosdepth` - skip genome-wide and amplicon coverage plot generation from mosdepth output + * `--amplicon_left_suffix` - to provide left primer suffix used in name field of `--amplicon_bed` + * `--amplicon_right_suffix` - to provide right primer suffix used in name field of `--amplicon_bed` + * Unify parameter specification with COG-UK pipeline: + * `--min_allele_freq` - minimum allele frequency threshold for calling variants + * `--mpileup_depth` - SAMTools mpileup max per-file depth + * `--ivar_exclude_reads` renamed to `--ivar_trim_noprimer` + * `--ivar_trim_min_len` - minimum length of read to retain after primer trimming + * `--ivar_trim_min_qual` - minimum quality threshold for sliding window to pass + * `--ivar_trim_window_width` - width of sliding window +* [#118] Updated GitHub Actions AWS workflow for small and full size tests. + +### `Removed` + +* `--skip_qc` parameter + +### `Dependencies` + +* Add mosdepth `0.2.6` +* Add bioconductor-complexheatmap `2.2.0` +* Add bioconductor-biostrings `2.54.0` +* Add r-optparse `1.6.6` +* Add r-tidyr `1.1.0` +* Add r-tidyverse `1.3.0` +* Add r-ggplot2 `3.3.1` +* Add r-reshape2 `1.4.4` +* Add r-viridis `0.5.1` +* Update sra-tools `2.10.3` -> `2.10.7` +* Update bowtie2 `2.3.5.1` -> `2.4.1` +* Update picard `2.22.8` -> `2.23.0` +* Update minia `3.2.3` -> `3.2.4` +* Update plasmidid `1.5.2` -> `1.6.3` + ## [1.0.0] - 2020-06-01 Initial release of nf-core/viralrecon, created with the [nf-core](http://nf-co.re/) template. diff --git a/CITATIONS.md b/CITATIONS.md index ef6aa270..485968c6 100644 --- a/CITATIONS.md +++ b/CITATIONS.md @@ -48,6 +48,9 @@ * [Minimap2](https://www.ncbi.nlm.nih.gov/pubmed/29750242/) > Li H. Minimap2: pairwise alignment for nucleotide sequences. Bioinformatics. 2018 Sep 15;34(18):3094-3100. doi: 10.1093/bioinformatics/bty191. PubMed PMID: 29750242; PubMed Central PMCID: PMC6137996. +* [mosdepth](https://www.ncbi.nlm.nih.gov/pubmed/29096012) + > Pedersen BS, Quinlan AR. Mosdepth: Quick Coverage Calculation for Genomes and Exomes. Bioinformatics. 2018 Mar 1;34(5):867-868. doi: 10.1093/bioinformatics/btx699. PMID: 29096012 PMCID: PMC6030888. + * [MultiQC](https://www.ncbi.nlm.nih.gov/pubmed/27312411/) > Ewels P, Magnusson M, Lundin S, Käller M. MultiQC: summarize analysis results for multiple tools and samples in a single report. Bioinformatics. 2016 Oct 1;32(19):3047-8. doi: 10.1093/bioinformatics/btw354. Epub 2016 Jun 16. PubMed PMID: 27312411; PubMed Central PMCID: PMC5039924. diff --git a/Dockerfile b/Dockerfile index e2ff3e00..4d994949 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,10 +10,10 @@ RUN conda env create -f /environment.yml && conda clean -a RUN apt-get install -y libgl1-mesa-glx && apt-get clean -y # Add conda installation dir to PATH (instead of doing 'conda activate') -ENV PATH /opt/conda/envs/nf-core-viralrecon-1.0.0/bin:$PATH +ENV PATH /opt/conda/envs/nf-core-viralrecon-1.1.0/bin:$PATH # Dump the details of the installed packages to a file for posterity -RUN conda env export --name nf-core-viralrecon-1.0.0 > nf-core-viralrecon-1.0.0.yml +RUN conda env export --name nf-core-viralrecon-1.1.0 > nf-core-viralrecon-1.1.0.yml # Instruct R processes to use these empty files instead of clashing with a local version RUN touch .Rprofile diff --git a/README.md b/README.md index 4c5d43b5..1a1e8166 100644 --- a/README.md +++ b/README.md @@ -3,22 +3,17 @@ [![GitHub Actions CI Status](https://github.com/nf-core/viralrecon/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/viralrecon/actions) [![GitHub Actions Linting Status](https://github.com/nf-core/viralrecon/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/viralrecon/actions) [![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3901628.svg)](https://doi.org/10.5281/zenodo.3901628) [![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](http://bioconda.github.io/) [![Docker](https://img.shields.io/docker/automated/nfcore/viralrecon.svg)](https://hub.docker.com/r/nfcore/viralrecon) -[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3872730.svg)](https://doi.org/10.5281/zenodo.3872730) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23viralrecon-4A154B?logo=slack)](https://nfcore.slack.com/channels/viralrecon) ## Introduction -**nfcore/viralrecon** is a bioinformatics analysis pipeline used to perform assembly and intrahost/low-frequency variant calling for viral samples. The pipeline currently supports metagenomics and amplicon sequencing data derived from the Illumina sequencing platform. +**nfcore/viralrecon** is a bioinformatics analysis pipeline used to perform assembly and intra-host/low-frequency variant calling for viral samples. The pipeline supports short-read Illumina sequencing data from both shotgun (e.g. sequencing directly from clinical samples) and enrichment-based library preparation methods (e.g. amplicon-based: [ARTIC SARS-CoV-2 enrichment protocol](https://artic.network/ncov-2019); or probe-capture-based). -This pipeline is a re-implementation of the [SARS_Cov2_consensus-nf](https://github.com/BU-ISCIII/SARS_Cov2_consensus-nf) and [SARS_Cov2_assembly-nf](https://github.com/BU-ISCIII/SARS_Cov2_assembly-nf) pipelines initially developed by [Sarai Varona](https://github.com/svarona) and [Sara Monzon](https://github.com/saramonzon) from [BU-ISCIII](https://github.com/BU-ISCIII). Porting both of these pipelines to nf-core was an international collaboration between numerous contributors and developers, led by [Harshil Patel](https://github.com/drpatelh) from the [The Bioinformatics & Biostatistics Group](https://www.crick.ac.uk/research/science-technology-platforms/bioinformatics-and-biostatistics/) at [The Francis Crick Institute](https://www.crick.ac.uk/), London. We appreciated the need to have a portable, reproducible and scalable pipeline for the analysis of COVID-19 sequencing samples and so the Avengers Assembled! Please come and join us and add yourself to the contributor list :) - -We have integrated a number of options in the pipeline to allow you to run specific aspects of the workflow if you so wish. For example, you can skip all of the assembly steps with the `--skip_assembly` parameter. See [usage docs](docs/usage.md) for all of the available options when running the pipeline. - -Please click [here](https://raw.githack.com/nf-core/viralrecon/master/docs/html/multiqc_report.html) to see an example MultiQC report generated using the parameters defined in [this configuration file](https://github.com/nf-core/viralrecon/blob/master/conf/test_full.config) to run the pipeline on [samples](https://zenodo.org/record/3735111) which were prepared from the [ncov-2019 ARTIC Network V1 amplicon set](https://artic.network/ncov-2019) and sequenced on the Illumina MiSeq platform in 301bp paired-end format. - -The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. Furthermore, automated continuous integration tests to run the pipeline on a full-sized dataset are passing on AWS cloud. +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with Docker containers making installation trivial and results highly reproducible. Furthermore, automated continuous integration tests that run the pipeline on a full-sized dataset using AWS cloud ensure that the code is stable. ## Pipeline summary @@ -32,9 +27,11 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool 3. Primer sequence removal ([`iVar`](https://github.com/andersen-lab/ivar); *amplicon data only*) 4. Duplicate read marking ([`picard`](https://broadinstitute.github.io/picard/); *removal optional*) 5. Alignment-level QC ([`picard`](https://broadinstitute.github.io/picard/), [`SAMtools`](https://sourceforge.net/projects/samtools/files/samtools/)) - 6. Choice of multiple variant calling and consensus sequence generation routes ([`VarScan 2`](http://dkoboldt.github.io/varscan/), [`BCFTools`](http://samtools.github.io/bcftools/bcftools.html), [`BEDTools`](https://github.com/arq5x/bedtools2/) *||* [`iVar variants and consensus`](https://github.com/andersen-lab/ivar) *||* [`BCFTools`](http://samtools.github.io/bcftools/bcftools.html), [`BEDTools`](https://github.com/arq5x/bedtools2/)) + 6. Genome-wide and amplicon coverage QC plots ([`mosdepth`](https://github.com/brentp/mosdepth/)) + 7. Choice of multiple variant calling and consensus sequence generation routes ([`VarScan 2`](http://dkoboldt.github.io/varscan/), [`BCFTools`](http://samtools.github.io/bcftools/bcftools.html), [`BEDTools`](https://github.com/arq5x/bedtools2/) *||* [`iVar variants and consensus`](https://github.com/andersen-lab/ivar) *||* [`BCFTools`](http://samtools.github.io/bcftools/bcftools.html), [`BEDTools`](https://github.com/arq5x/bedtools2/)) * Variant annotation ([`SnpEff`](http://snpeff.sourceforge.net/SnpEff.html), [`SnpSift`](http://snpeff.sourceforge.net/SnpSift.html)) * Consensus assessment report ([`QUAST`](http://quast.sourceforge.net/quast)) + 8. Intersect variants across callers ([`BCFTools`](http://samtools.github.io/bcftools/bcftools.html)) 6. _De novo_ assembly 1. Primer trimming ([`Cutadapt`](https://cutadapt.readthedocs.io/en/stable/guide.html); *amplicon data only*) 2. Removal of host reads ([`Kraken 2`](http://ccb.jhu.edu/software/kraken2/)) @@ -47,27 +44,52 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool * Variant annotation ([`SnpEff`](http://snpeff.sourceforge.net/SnpEff.html), [`SnpSift`](http://snpeff.sourceforge.net/SnpSift.html)) 7. Present QC and visualisation for raw read, alignment, assembly and variant calling results ([`MultiQC`](http://multiqc.info/)) +> **NB:** The pipeline has a number of options to allow you to run only specific aspects of the workflow if you so wish. +For example, you can skip all of the assembly steps with the `--skip_assembly` parameter. +See the [usage docs](docs/usage.md) for all of the available options when running the pipeline. + +## Pipeline reporting + +Numerous QC and reporting steps are included in the pipeline in order to collate a full summary of the analysis within a single [MultiQC](https://multiqc.info/) report. You can see [an example MultiQC report here](https://raw.githack.com/nf-core/viralrecon/master/docs/html/multiqc_report.html), generated using the parameters defined in [this configuration file](https://github.com/nf-core/viralrecon/blob/master/conf/test_full.config). The pipeline was run with [these samples](https://zenodo.org/record/3735111), prepared from the [ncov-2019 ARTIC Network V1 amplicon set](https://artic.network/ncov-2019) and sequenced on the Illumina MiSeq platform in 301bp paired-end format. + ## Quick Start -i. Install [`nextflow`](https://nf-co.re/usage/installation) +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install either [`Docker`](https://docs.docker.com/engine/installation/) or [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/viralrecon -profile test, + ``` -ii. Install either [`Docker`](https://docs.docker.com/engine/installation/) or [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) for full pipeline reproducibility (please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles)) + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. -iii. Download the pipeline and test it on a minimal dataset with a single command +4. Start running your own analysis! -```bash -nextflow run nf-core/viralrecon -profile test, -``` + * Typical command for shotgun analysis: -> Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + ```bash + nextflow run nf-core/viralrecon \ + --input samplesheet.csv \ + --genome 'MN908947.3' \ + -profile + ``` -iv. Start running your own analysis! + * Typical command for amplicon analysis: -```bash -nextflow run nf-core/viralrecon -profile --input samplesheet.csv --genome 'NC_045512.2' -profile docker -``` + ```bash + nextflow run nf-core/viralrecon \ + --input samplesheet.csv \ + --genome 'MN908947.3' \ + --protocol amplicon \ + --amplicon_bed ./nCoV-2019.artic.V3.bed \ + --skip_assembly \ + -profile + ``` -See [usage docs](docs/usage.md) for all of the available options when running the pipeline. +See the [usage documentation](docs/usage.md) for all of the available options when running the pipeline. ## Documentation @@ -86,16 +108,21 @@ The nf-core/viralrecon pipeline comes with documentation about the pipeline, fou These scripts were originally written by [Sarai Varona](https://github.com/svarona), [Miguel Juliá](https://github.com/MiguelJulia) and [Sara Monzon](https://github.com/saramonzon) from [BU-ISCIII](https://github.com/BU-ISCIII) and co-ordinated by Isabel Cuesta for the [Institute of Health Carlos III](https://eng.isciii.es/eng.isciii.es/Paginas/Inicio.html), Spain. Through collaboration with the nf-core community the pipeline has now been updated substantially to include additional processing steps, to standardise inputs/outputs and to improve pipeline reporting; implemented primarily by [Harshil Patel](https://github.com/drpatelh) from [The Bioinformatics & Biostatistics Group](https://www.crick.ac.uk/research/science-technology-platforms/bioinformatics-and-biostatistics/) at [The Francis Crick Institute](https://www.crick.ac.uk/), London. -Many thanks to others who have helped out and contributed along the way too, including (but not limited to): +Many thanks to others who have helped out and contributed along the way too, including (but not limited to)\*: | Name | Affiliation | |-----------------------------------------------------------|---------------------------------------------------------------------------------------| +| [Aengus Stewart](https://github.com/stewarta) | [The Francis Crick Institute, UK](https://www.crick.ac.uk/) | | [Alexander Peltzer](https://github.com/apeltzer) | [Boehringer Ingelheim, Germany](https://www.boehringer-ingelheim.de/) | | [Alison Meynert](https://github.com/ameynert) | [University of Edinburgh, Scotland](https://www.ed.ac.uk/) | +| [Anton Korobeynikov](https://github.com/asl) | [Saint Petersburg State University, Russia](https://english.spbu.ru/) | +| [Artem Babaian](https://github.com/ababaian) | [University of British Columbia, Canada](https://www.ubc.ca/) | +| [Dmitry Meleshko](https://github.com/1dayac) | [Saint Petersburg State University, Russia](https://english.spbu.ru/) | | [Edgar Garriga Nogales](https://github.com/edgano) | [Centre for Genomic Regulation, Spain](https://www.crg.eu/) | | [Erik Garrison](https://github.com/ekg) | [UCSC, USA](https://www.ucsc.edu/) | | [Gisela Gabernet](https://github.com/ggabernet) | [QBiC, University of Tübingen, Germany](https://portal.qbic.uni-tuebingen.de/portal/) | | [Joao Curado](https://github.com/jcurado-flomics) | [Flomics Biotech, Spain](https://www.flomics.com/) | +| [Jerome Nicod](https://github.com/Jeromics) | [The Francis Crick Institute, UK](https://www.crick.ac.uk) | | [Jose Espinosa-Carrasco](https://github.com/JoseEspinosa) | [Centre for Genomic Regulation, Spain](https://www.crg.eu/) | | [Katrin Sameith](https://github.com/ktrns) | [DRESDEN-concept Genome Center, Germany](https://genomecenter.tu-dresden.de) | | [Lluc Cabus](https://github.com/lcabus-flomics) | [Flomics Biotech, Spain](https://www.flomics.com/) | @@ -103,21 +130,23 @@ Many thanks to others who have helped out and contributed along the way too, inc | [Maxime Garcia](https://github.com/MaxUlysse) | [SciLifeLab, Sweden](https://www.scilifelab.se/) | | [Michael Heuer](https://github.com/heuermh) | [UC Berkeley, USA](https://https://rise.cs.berkeley.edu) | | [Phil Ewels](https://github.com/ewels) | [SciLifeLab, Sweden](https://www.scilifelab.se/) | +| [Richard Mitter](https://github.com/rjmitter) | [The Francis Crick Institute, UK](https://www.crick.ac.uk/) | +| [Robert Goldstone](https://github.com/rjgoldstone) | [The Francis Crick Institute, UK](https://www.crick.ac.uk/) | | [Simon Heumos](https://github.com/subwaystation) | [QBiC, University of Tübingen, Germany](https://portal.qbic.uni-tuebingen.de/portal/) | | [Stephen Kelly](https://github.com/stevekm) | [Memorial Sloan Kettering Cancer Center, USA](https://www.mskcc.org/) | | [Thanh Le Viet](https://github.com/thanhleviet) | [Quadram Institute, UK](https://quadram.ac.uk/) | -> Listed in alphabetical order +> \* Listed in alphabetical order ## Contributions and Support If you would like to contribute to this pipeline, please see the [contributing guidelines](https://github.com/nf-core/viralrecon/blob/master/.github/CONTRIBUTING.md). -For further information or help, don't hesitate to get in touch on [Slack](https://nfcore.slack.com/channels/viralrecon) (you can join with [this invite](https://nf-co.re/join/slack)). +For further information or help, don't hesitate to get in touch on [Slack `#viralrecon` channel](https://nfcore.slack.com/channels/viralrecon) (you can join with [this invite](https://nf-co.re/join/slack)). ## Citation -If you use nf-core/viralrecon for your analysis, please cite it using the following doi: [10.5281/zenodo.3872730](https://doi.org/10.5281/zenodo.3872730) +If you use nf-core/viralrecon for your analysis, please cite it using the following doi: [10.5281/zenodo.3901628](https://doi.org/10.5281/zenodo.3901628) An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](https://github.com/nf-core/viralrecon/blob/master/CITATIONS.md) file. diff --git a/assets/email_template.html b/assets/email_template.html index 7dcbc3b8..ca2c3685 100644 --- a/assets/email_template.html +++ b/assets/email_template.html @@ -16,7 +16,7 @@

nf-core/viralrecon v${version}

Run Name: $runName

-<% if (!success){ +<% if (!success) { out << """

nf-core/viralrecon execution completed unsuccessfully!

@@ -25,6 +25,17 @@

nf-core/viralrecon execution completed
${errorReport}

""" +} else if (fail_mapped_reads.size() > 0) { + out << """ +
+

nf-core/viralrecon execution completed with warnings!

+

The pipeline finished successfully, but the following samples were skipped due to failing the minimum mapped read threshold (< ${min_mapped_reads}):

+
    +
  • ${fail_mapped_reads.sort().join('
  • ')}
  • +
+

+

+ """ } else { out << """
diff --git a/assets/email_template.txt b/assets/email_template.txt index f7235df3..9b78bb18 100644 --- a/assets/email_template.txt +++ b/assets/email_template.txt @@ -9,19 +9,31 @@ Run Name: $runName -<% if (success){ - out << "## nf-core/viralrecon execution completed successfully! ##" -} else { +<% if (!success){ out << """#################################################### -## nf-core/viralrecon execution completed unsuccessfully! ## -#################################################### +## nf-core/viralrecon completed unsuccessfully! ## +####################################################\n The exit status of the task that caused the workflow execution to fail was: $exitStatus. The full error message was: ${errorReport} """ -} %> +} else if (fail_mapped_reads.size() > 0) { + out << """#################################################### +## nf-core/viralrecon completed with warnings! ## +####################################################\n +The pipeline finished successfully, but the following samples were skipped +due to failing the minimum mapped read threshold (less than ${min_mapped_reads}): + - ${fail_mapped_reads.sort().join("\n - ")} +""" +} else { + out << """#################################################### +## nf-core/viralrecon completed successfully! ## +####################################################\n +""" +} +%> The workflow was completed at $dateComplete (duration: $duration) @@ -30,11 +42,11 @@ The command used to launch the workflow was as follows: $commandLine - Pipeline Configuration: ----------------------- <% out << summary.collect{ k,v -> " - $k: $v" }.join("\n") %> + -- nf-core/viralrecon https://github.com/nf-core/viralrecon diff --git a/assets/multiqc_config.yaml b/assets/multiqc_config.yaml index 8613adff..681ce675 100644 --- a/assets/multiqc_config.yaml +++ b/assets/multiqc_config.yaml @@ -12,6 +12,7 @@ run_modules: - bowtie2 - samtools - picard + - mosdepth - varscan2 - ivar - bcftools @@ -23,29 +24,46 @@ run_modules: exclude_modules: - 'general_stats' +# See https://github.com/ewels/MultiQC_TestData/blob/master/data/custom_content/with_config/table_headerconfig/multiqc_config.yaml +custom_data: + summary_variants_metrics: + section_name: 'Variant calling metrics' + description: 'generated by the nf-core/viralrecon pipeline' + plot_type: 'table' + pconfig: + id: 'summary_variants_metrics_plot' + table_title: 'Variant calling metrics' + namespace: 'Variant calling metrics' + format: '{:.0f}' + summary_assembly_metrics: + section_name: 'De novo assembly metrics' + description: 'generated by the nf-core/viralrecon pipeline' + plot_type: 'table' + pconfig: + id: 'summary_assembly_metrics_plot' + table_title: 'De novo assembly metrics' + namespace: 'De novo assembly metrics' + format: '{:.0f}' + module_order: - fastqc: name: 'PREPROCESS: FastQC (raw reads)' anchor: 'fastqc_raw' info: 'This section of the report shows FastQC results for the raw reads before adapter trimming.' path_filters: - - './fastqc/*' + - './fastqc/*.zip' - fastp: name: 'PREPROCESS: fastp (adapter trimming)' info: 'This section of the report shows fastp results for reads after adapter and quality trimming.' - path_filters: - - './fastp/log/*' - fastqc: name: 'PREPROCESS: FastQC (adapter trimming)' anchor: 'fastqc_fastp' info: 'This section of the report shows FastQC results for reads after adapter and quality trimming.' path_filters: - - './fastp/fastqc/*' + - './fastp/fastqc/*.zip' - bowtie2: name: 'VARIANTS: Bowtie 2' info: 'This section of the report shows Bowtie 2 mapping results for reads after adapter trimming and quality trimming.' - path_filters: - - './bowtie2/log/*' - samtools: name: 'VARIANTS: SAMTools (raw)' anchor: 'samtools_bowtie2' @@ -55,8 +73,6 @@ module_order: - ivar: name: 'VARIANTS: iVar trim' info: 'This section of the report shows counts observed for each amplicon primer per sample as detected by iVar trim.' - path_filters: - - './ivar/trim/log/*' - samtools: name: 'VARIANTS: SAMTools (iVar)' anchor: 'samtools_ivar' @@ -72,155 +88,150 @@ module_order: - picard: name: 'VARIANTS: Picard Metrics' info: 'This section of the report shows picard CollectMultipleMetrics and MarkDuplicates results after mapping (if "--protocol amplicon" this will be after primer sequence removal with iVar).' - path_filters: - - './picard/metrics/*' + - mosdepth: + name: 'VARIANTS: mosdepth' + info: 'This section of the report shows genome-wide coverage metrics generated by mosdepth.' - varscan2: name: 'VARIANTS: VarScan 2' info: 'This section of the report shows total number of variants called by VarScan 2 broken down by those that were reported or not.' - path_filters: - - './varscan2/counts/lowfreq/*' - bcftools: name: 'VARIANTS: BCFTools (VarScan 2; high freq)' anchor: 'bcftools_varscan2' info: 'This section of the report shows BCFTools stats results for high frequency variants called by VarScan 2. The allele frequency filtering threshold can be set by the --max_allele_freq parameter (Default: 0.8).' path_filters: - - './varscan2/bcftools/highfreq/*' + - './varscan2/bcftools/highfreq/*.txt' - snpeff: name: 'VARIANTS: SnpEff (VarScan 2; high freq)' anchor: 'snpeff_varscan2' info: 'This section of the report shows SnpEff results for high frequency variants called by VarScan 2. The allele frequency filtering threshold can be set by the --max_allele_freq parameter (Default: 0.8).' path_filters: - - './varscan2/snpeff/highfreq/*' + - './varscan2/snpeff/highfreq/*.csv' - quast: name: 'VARIANTS: QUAST (VarScan 2; high freq)' anchor: 'quast_varscan2' info: 'This section of the report shows QUAST results for consensus sequences generated from high frequency variants with VarScan 2. The allele frequency filtering threshold can be set by the --max_allele_freq parameter (Default: 0.8).' path_filters: - - './varscan2/quast/highfreq/*' + - './varscan2/quast/highfreq/*.tsv' - bcftools: name: 'VARIANTS: BCFTools (iVar; high freq)' anchor: 'bcftools_ivar' info: 'This section of the report shows BCFTools stats results for high frequency variants called by iVar. The allele frequency filtering threshold can be set by the --max_allele_freq parameter (Default: 0.8).' path_filters: - - './ivar/variants/bcftools/highfreq/*' + - './ivar/variants/bcftools/highfreq/*.txt' - snpeff: name: 'VARIANTS: SnpEff (iVar; high freq)' anchor: 'snpeff_ivar' info: 'This section of the report shows SnpEff results for high frequency variants called by iVar. The allele frequency filtering threshold can be set by the --max_allele_freq parameter (Default: 0.8).' path_filters: - - './ivar/variants/snpeff/highfreq/*' + - './ivar/variants/snpeff/highfreq/*.csv' - quast: name: 'VARIANTS: QUAST (iVar; high freq)' anchor: 'quast_ivar' info: 'This section of the report shows QUAST results for consensus sequences generated from high frequency variants with iVar. The allele frequency filtering threshold can be set by the --max_allele_freq parameter (Default: 0.8).' path_filters: - - './ivar/consensus/quast/highfreq/*' + - './ivar/consensus/quast/highfreq/*.tsv' - bcftools: name: 'VARIANTS: BCFTools (BCFTools)' anchor: 'bcftools_bcftools' info: 'This section of the report shows BCFTools stats results for variants called by BCFTools.' path_filters: - - './bcftools/variants/bcftools/*' + - './bcftools/variants/bcftools/*.txt' - snpeff: name: 'VARIANTS: SnpEff (BCFTools)' anchor: 'snpeff_bcftools' info: 'This section of the report shows SnpEff results for variants called by BCFTools.' path_filters: - - './bcftools/variants/snpeff/*' + - './bcftools/variants/snpeff/*.csv' - quast: name: 'VARIANTS: QUAST (BCFTools)' anchor: 'quast_bcftools' info: 'This section of the report shows QUAST results for consensus sequence generated from BCFTools variants.' path_filters: - - './bcftools/consensus/quast/*' + - './bcftools/consensus/quast/*.tsv' - cutadapt: name: 'ASSEMBLY: Cutadapt (primer trimming)' info: 'This section of the report shows Cutadapt results for reads after primer sequence trimming.' - path_filters: - - './cutadapt/log/*' - fastqc: name: 'ASSEMBLY: FastQC (primer trimming)' anchor: 'fastqc_cutadapt' info: 'This section of the report shows FastQC results for reads after primer sequence trimming with Cutadapt.' path_filters: - - './cutadapt/fastqc/*' + - './cutadapt/fastqc/*.zip' - kraken: name: 'ASSEMBLY: Kraken 2' info: 'This section of the report shows Kraken 2 classification results for reads after primer sequence trimming with Cutadapt.' - path_filters: - - './kraken2/*' - quast: name: 'ASSEMBLY: QUAST (SPAdes)' anchor: 'quast_spades' info: 'This section of the report shows QUAST results from SPAdes de novo assembly.' path_filters: - - './spades/quast/*' + - './spades/quast/*.tsv' - bcftools: name: 'ASSEMBLY: BCFTools (SPAdes)' anchor: 'bcftools_spades' info: 'This section of the report shows BCFTools stats results for variants called in the SPAdes assembly relative to the reference.' path_filters: - - './spades/bcftools/*' + - './spades/bcftools/*.txt' - snpeff: name: 'ASSEMBLY: SnpEff (SPAdes)' anchor: 'snpeff_spades' info: 'This section of the report shows SnpEff results for variants called in the SPAdes assembly relative to the reference.' path_filters: - - './spades/snpeff/*' + - './spades/snpeff/*.csv' - quast: name: 'ASSEMBLY: QUAST (MetaSPAdes)' anchor: 'quast_metaspades' info: 'This section of the report shows QUAST results from MetaSPAdes de novo assembly.' path_filters: - - './metaspades/quast/*' + - './metaspades/quast/*.tsv' - bcftools: name: 'ASSEMBLY: BCFTools (MetaSPAdes)' anchor: 'bcftools_metaspades' info: 'This section of the report shows BCFTools stats results for variants called in the MetaSPAdes assembly relative to the reference.' path_filters: - - './metaspades/bcftools/*' + - './metaspades/bcftools/*.txt' - snpeff: name: 'ASSEMBLY: SnpEff (MetaSPAdes)' anchor: 'snpeff_metaspades' info: 'This section of the report shows SnpEff results for variants called in the MetaSPAdes assembly relative to the reference.' path_filters: - - './metaspades/snpeff/*' + - './metaspades/snpeff/*.csv' - quast: name: 'ASSEMBLY: QUAST (Unicycler)' anchor: 'quast_unicycler' info: 'This section of the report shows QUAST results from Unicycler de novo assembly.' path_filters: - - './unicycler/quast/*' + - './unicycler/quast/*.tsv' - bcftools: name: 'ASSEMBLY: BCFTools (Unicycler)' anchor: 'bcftools_unicycler' info: 'This section of the report shows BCFTools stats results for variants called in the Unicycler assembly relative to the reference.' path_filters: - - './unicycler/bcftools/*' + - './unicycler/bcftools/*.txt' - snpeff: name: 'ASSEMBLY: SnpEff (Unicycler)' anchor: 'snpeff_unicycler' info: 'This section of the report shows SnpEff results for variants called in the Unicycler assembly relative to the reference.' path_filters: - - './unicycler/snpeff/*' + - './unicycler/snpeff/*.csv' - quast: name: 'ASSEMBLY: QUAST (minia)' anchor: 'quast_minia' info: 'This section of the report shows QUAST results from minia de novo assembly.' path_filters: - - './minia/quast/*' + - './minia/quast/*.tsv' - bcftools: name: 'ASSEMBLY: BCFTools (minia)' anchor: 'bcftools_minia' info: 'This section of the report shows BCFTools stats results for variants called in the minia assembly relative to the reference.' path_filters: - - './minia/bcftools/*' + - './minia/bcftools/*.txt' - snpeff: name: 'ASSEMBLY: SnpEff (minia)' anchor: 'snpeff_minia' info: 'This section of the report shows SnpEff results for variants called in the minia assembly relative to the reference.' path_filters: - - './minia/snpeff/*' + - './minia/snpeff/*.csv' report_section_order: summary_assembly_metrics: @@ -232,45 +243,75 @@ report_section_order: nf-core-viralrecon-summary: order: -1002 +bcftools: + collapse_complementary_changes: true + custom_plot_config: picard_insert_size: cpswitch_c_active: False smooth_points: 1000 -bcftools: - collapse_complementary_changes: true - -# See https://github.com/ewels/MultiQC_TestData/blob/master/data/custom_content/with_config/table_headerconfig/multiqc_config.yaml -custom_data: - summary_variants_metrics: - section_name: 'Variant calling metrics' - description: 'generated by the nf-core/viralrecon pipeline' - plot_type: 'table' - pconfig: - id: 'summary_variants_metrics_plot' - table_title: 'Variant calling metrics' - namespace: 'Variant calling metrics' - format: '{:.0f}' - summary_assembly_metrics: - section_name: 'De novo assembly metrics' - description: 'generated by the nf-core/viralrecon pipeline' - plot_type: 'table' - pconfig: - id: 'summary_assembly_metrics_plot' - table_title: 'De novo assembly metrics' - namespace: 'De novo assembly metrics' - format: '{:.0f}' - extra_fn_clean_exts: - - '.trim' - - '.bowtie2' - - '.mkD' - - '.ptrim' - - '.highfreq' - - '.lowfreq' - - '.consensus' - - '.snpEff' - - '.scaffolds' - - '.kraken2' - - type: regex - pattern: '.(AF|k)[0-9]+.*' + - '.trim' + - '.bowtie2' + - '.mkD' + - '.ptrim' + - '.highfreq' + - '.lowfreq' + - '.consensus' + - '.snpEff' + - '.scaffolds' + - '.kraken2' + - type: regex + pattern: '.(AF|k)[0-9]+.*' + +# # Customise the module search patterns to speed up execution time +# # - Skip module sub-tools that we are not interested in +# # - Replace file-content searching with filename pattern searching +# # - Don't add anything that is the same as the MultiQC default +# # See https://multiqc.info/docs/#optimise-file-search-patterns for details +sp: + fastp: + fn: '*.fastp.json' + bowtie2: + fn: '*.bowtie2.log' + ivar/trim: + fn: '*.ivar.log' + mosdepth/global_dist: + fn: '*.global.dist.txt' + varscan2/mpileup2cns: + fn: '*.varscan2.log' + cutadapt: + fn: '*.cutadapt.log' + picard/alignment_metrics: + fn: '*.alignment_summary_metrics' + picard/insertsize: + fn: '*.insert_size_metrics' + picard/markdups: + fn: '*.MarkDuplicates.metrics.txt' + picard/wgs_metrics: + fn: '*.coverage_metrics' + picard/basedistributionbycycle: + skip: true + picard/gcbias: + skip: true + picard/hsmetrics: + skip: true + picard/oxogmetrics: + skip: true + picard/pcr_metrics: + skip: true + picard/quality_by_cycle: + skip: true + picard/quality_score_distribution: + skip: true + picard/quality_yield_metrics: + skip: true + picard/rnaseqmetrics: + skip: true + picard/rrbs_metrics: + skip: true + picard/sam_file_validation: + skip: true + picard/variant_calling_metrics: + skip: true diff --git a/bin/collapse_amplicon_bed.py b/bin/collapse_amplicon_bed.py new file mode 100755 index 00000000..9c2346e8 --- /dev/null +++ b/bin/collapse_amplicon_bed.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +import os +import sys +import re +import errno +import argparse + + +def parse_args(args=None): + Description = 'Collapse LEFT/RIGHT primers in amplicon BED to single intervals.' + Epilog = """Example usage: python collapse_amplicon_bed.py """ + + parser = argparse.ArgumentParser(description=Description, epilog=Epilog) + parser.add_argument('FILE_IN', help="Input BED file.") + parser.add_argument('FILE_OUT', help="Output BED file.") + parser.add_argument('-lp', '--left_primer_suffix', type=str, dest="LEFT_PRIMER_SUFFIX", default='_LEFT', help="Suffix for left primer in name column of BED file (default: '_LEFT').") + parser.add_argument('-rp', '--right_primer_suffix', type=str, dest="RIGHT_PRIMER_SUFFIX", default='_RIGHT', help="Suffix for right primer in name column of BED file (default: '_RIGHT').") + return parser.parse_args(args) + + +def make_dir(path): + if not len(path) == 0: + try: + os.makedirs(path) + except OSError as exception: + if exception.errno != errno.EEXIST: + raise + + +## See https://stackoverflow.com/a/480227 +def uniqify(seq): + seen = set() + seen_add = seen.add + return [x for x in seq if not (x in seen or seen_add(x))] + + +def collapse_amplicon_bed(FileIn,FileOut,LeftPrimerSuffix,RightPrimerSuffix): + StartPosList = [] + IntervalDict = {} + fin = open(FileIn,'r') + while True: + line = fin.readline() + if line: + chrom,start,end,name,score,strand = line.strip().split('\t') + amplicon = re.sub(r'(?:{}|{}).*'.format(LeftPrimerSuffix,RightPrimerSuffix),'',name) + if amplicon not in IntervalDict: + IntervalDict[amplicon] = [] + IntervalDict[amplicon].append((chrom,int(start),int(end),score)) + StartPosList.append((int(start),amplicon)) + else: + fin.close() + break + + fout = open(FileOut,'w') + for amplicon in uniqify([x[1] for x in sorted(StartPosList)]): + posList = [item for elem in IntervalDict[amplicon] for item in elem[1:3]] + chrom = IntervalDict[amplicon][0][0] + start = min(posList) + end = max(posList) + strand = '+' + score = IntervalDict[amplicon][0][3] + fout.write(f'{chrom}\t{start}\t{end}\t{amplicon}\t{score}\t{strand}\n') + fout.close() + + +def main(args=None): + args = parse_args(args) + collapse_amplicon_bed(args.FILE_IN,args.FILE_OUT,args.LEFT_PRIMER_SUFFIX,args.RIGHT_PRIMER_SUFFIX) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/bin/ivar_variants_to_vcf.py b/bin/ivar_variants_to_vcf.py index 4860266f..5abe87b9 100755 --- a/bin/ivar_variants_to_vcf.py +++ b/bin/ivar_variants_to_vcf.py @@ -13,7 +13,7 @@ def parse_args(args=None): parser.add_argument('FILE_IN', help="Input tsv file.") parser.add_argument('FILE_OUT', help="Full path to output vcf file.") parser.add_argument('-po', '--pass_only', dest="PASS_ONLY", help="Only output variants that PASS all filters.",action='store_true') - parser.add_argument('-ma', '--min_allele_freq', type=float, dest="MIN_ALLELE_FREQ", default=0, help="Only output variants where allele frequency greater than this number (default: 0).") + parser.add_argument('-af', '--allele_freq_thresh', type=float, dest="ALLELE_FREQ_THRESH", default=0, help="Only output variants where allele frequency greater than this number (default: 0).") return parser.parse_args(args) @@ -96,7 +96,7 @@ def ivar_variants_to_vcf(FileIn,FileOut,passOnly=False,minAF=0): def main(args=None): args = parse_args(args) - ivar_variants_to_vcf(args.FILE_IN,args.FILE_OUT,args.PASS_ONLY,args.MIN_ALLELE_FREQ) + ivar_variants_to_vcf(args.FILE_IN,args.FILE_OUT,args.PASS_ONLY,args.ALLELE_FREQ_THRESH) if __name__ == '__main__': diff --git a/bin/multiqc_to_custom_tsv.py b/bin/multiqc_to_custom_tsv.py index 8afae320..dcb067ed 100755 --- a/bin/multiqc_to_custom_tsv.py +++ b/bin/multiqc_to_custom_tsv.py @@ -44,9 +44,11 @@ def yaml_fields_to_dict(YAMLFile,AppendDict={},FieldMappingList=[],ValidSampleLi for k in yaml_dict.keys(): key = k if os.path.basename(YAMLFile).startswith('multiqc_picard_insertSize'): - key = k[:-3] + if k[-3:] == '_FR': + key = k[:-3] if os.path.basename(YAMLFile).startswith('multiqc_cutadapt'): - names = [x for x in ValidSampleList if key.startswith(x)] + names = [x for x in ValidSampleList if key[:-2] == x] + names += [x for x in ValidSampleList if key == x] if names != []: key = names[0] inclSample = True diff --git a/bin/plot_base_density.r b/bin/plot_base_density.r new file mode 100755 index 00000000..4406b8c5 --- /dev/null +++ b/bin/plot_base_density.r @@ -0,0 +1,164 @@ +#!/usr/bin/env Rscript + +################################################ +################################################ +## LOAD LIBRARIES ## +################################################ +################################################ + +library(optparse) +library(ggplot2) +library(scales) +library(reshape2) +library(Biostrings) + +################################################ +################################################ +## VALIDATE COMMAND-LINE PARAMETERS ## +################################################ +################################################ + +option_list <- list(make_option(c("-i", "--fasta_files"), type="character", default=NULL, help="Comma-separated list of fasta files", metavar="fasta_files"), + make_option(c("-s", "--prefixes"), type="character", default=NULL, help="Comma-separated list of prefixes associated with fasta files to add to plots. Must be unique and in same order as fasta file input.", metavar="prefixes"), + make_option(c("-o", "--output_dir"), type="character", default='./', help="Output directory", metavar="path")) + +opt_parser <- OptionParser(option_list=option_list) +opt <- parse_args(opt_parser) + +## Check input files +INPUT_FILES <- unique(unlist(strsplit(opt$fasta_files,","))) +if (length(INPUT_FILES) == 0) { + print_help(opt_parser) + stop("At least one input file must be supplied", call.=FALSE) +} +if (!all(file.exists(INPUT_FILES))) { + stop(paste("The following input files don't exist:",paste(INPUT_FILES[!file.exists(INPUT_FILES)], sep='', collapse=' '), sep=' '), call.=FALSE) +} + +## Check prefixes for input files +PREFIXES <- basename(INPUT_FILES) +if (!is.null(opt$prefixes)){ + PREFIXES <- unique(unlist(strsplit(opt$prefixes,","))) + if (length(INPUT_FILES) != length(PREFIXES)) { + print_help(opt_parser) + stop("Please provide a unique prefix for each fasta file.", call.=FALSE) + } +} + +## Check the output directory has a trailing slash, if not add one +OUTDIR <- opt$output_dir +if (tail(strsplit(OUTDIR,"")[[1]],1)!="/") { + OUTDIR <- paste(OUTDIR,"/",sep='') +} +## Create the directory if it doesn't already exist. +if (!file.exists(OUTDIR)) { + dir.create(OUTDIR, recursive=TRUE) +} + +################################################ +################################################ +## READ IN DATA ## +################################################ +################################################ + +dat <- NULL +for (input_file in INPUT_FILES) { + dat <- c(dat,readDNAStringSet(input_file)[1]) +} + +################################################ +################################################ +## PLOTS ## +################################################ +################################################ + +bases_std <- c("A","C","T","G") +base_cols <- c("A" = "#009E73", + "C" = "#0072B2", + "T" = "#D55E00", + "G" = "#000000", + "N" = "#E69F00", + "X" = "#999999") + +for (idx in 1:length(dat)) { + + ## Table of base counts + base_seq <- strsplit(toString(dat[[idx]]), "")[[1]] + base_tab <- data.frame(table(base_seq), stringsAsFactors=FALSE) + colnames(base_tab) <- c("base","freq") + rownames(base_tab) <- base_tab$base + for (base in 1:length(bases_std)) { + if (!any(base_tab$base %in% bases_std[base])) { + base_tab <- rbind(base_tab,c(bases_std[base],0)) + } + } + base_tab$perc <- 100 *base_tab$freq / sum(base_tab$freq) + base_tab <- base_tab[order(base_tab$base, decreasing=FALSE),] + base_tab <- rbind(base_tab[c(bases_std, "N"),], base_tab[!rownames(base_tab) %in% c(bases_std, "N"),]) + base_tab$base <- factor(base_tab$base, levels=rownames(base_tab)) + outfile <- paste(OUTDIR, PREFIXES[idx], ".base_counts.tsv", sep='') + write.table(base_tab, file=outfile, col.names=TRUE, row.names=FALSE, sep="\t", quote=FALSE) + + ## Barplot of base frequencies + barplot <- ggplot(base_tab, aes(x=base,y=perc)) + + geom_bar(stat="identity") + + theme_classic() + + scale_y_continuous(limits=c(0,100),breaks=c(0,25,50,75,100)) + + ylab("% Observed") + + xlab("Base") + + ggtitle(PREFIXES[idx]) + outfile <- paste(OUTDIR, PREFIXES[idx], ".base_counts.pdf", sep='') + ggsave(file=outfile, barplot, width=12, height=10, units="cm") + + ## Create a data frame of base coverage + bases <- unique(c(bases_std,"N",unique(base_seq))) + base_dat <- data.frame(sample=names(dat[[idx]])[1], position=1:length(base_seq), stringsAsFactors=FALSE) + for (base in 1:length(bases)) { + base_dat[,bases[base]] <- as.numeric(base_seq==bases[base]) + } + + ## Stretches of N's + N_rle <- Rle(base_dat[,"N"]) + N_dat <- data.frame(start=cumsum(runLength(N_rle))[runValue(N_rle)==1], width=runLength(N_rle)[runValue(N_rle)==1]) + outfile <- paste(OUTDIR, PREFIXES[idx], ".N_run.tsv", sep='') + write.table(N_dat, file=outfile, col.names=TRUE, row.names=FALSE, sep="\t", quote=FALSE) + + ## Running mean of bp density for standard bases + run_k <- 1001 + run_dat <- base_dat[,c("sample", "position", bases_std)] + for (base in bases_std) { + run_dat[,base] <- as.numeric(runmean(Rle(base_dat[,base]), k=run_k, endrule="constant")) + } + run_dat <- melt(run_dat, c(1,2)) + colnames(run_dat)[3] <- "base" + run_dat$position <- run_dat$position/1000 + lineplot <- ggplot(run_dat,aes(x=position, y=value, colour=base)) + + geom_line() + + theme_classic() + + theme(panel.border=element_rect(colour="black", fill=NA, size=1)) + + scale_y_continuous(breaks=c(0,0.25,0.50,0.75,1)) + + xlab("Position (Kb)") + + ylab(paste("Base density (running mean k=",run_k,")", sep='')) + + ggtitle(PREFIXES[idx]) + + scale_colour_manual(values=base_cols) + outfile <- paste(OUTDIR, PREFIXES[idx], ".ACTG_density.pdf", sep='') + ggsave(file=outfile, lineplot, width=18, height=10, units="cm") + + ## Single base density plots, nucleotide resolution. + bases_other <- bases[!bases %in% bases_std] + for (obase in bases_other) { + plot_dat <- base_dat[,c("sample", "position", obase)] + colnames(plot_dat)[3] <- "base" + plot_col <- ifelse(obase=="N", base_cols[["N"]], base_cols[["X"]]) + lineplot <- ggplot(plot_dat, aes(x=position/1000, y=base)) + + geom_line(colour=plot_col) + + theme_classic() + + theme(legend.position="none", panel.border=element_rect(colour="black", fill=NA, size=1)) + + scale_y_continuous(breaks=c(0,1), labels=c(0,1)) + + xlab("Position (Kb)") + + ylab(paste(obase,"density", sep=' ')) + + ggtitle(PREFIXES[idx]) + outfile <- paste(OUTDIR, PREFIXES[idx], ".", obase, "_density.pdf", sep='') + ggsave(file=outfile, lineplot, width=18, height=10, units="cm") + } +} diff --git a/bin/plot_mosdepth_regions.r b/bin/plot_mosdepth_regions.r new file mode 100755 index 00000000..195726f6 --- /dev/null +++ b/bin/plot_mosdepth_regions.r @@ -0,0 +1,171 @@ +#!/usr/bin/env Rscript + +################################################ +################################################ +## LOAD LIBRARIES ## +################################################ +################################################ + +library(optparse) +library(ggplot2) +library(scales) +library(ComplexHeatmap) +library(viridis) +library(tidyverse) + +################################################ +################################################ +## VALIDATE COMMAND-LINE PARAMETERS ## +################################################ +################################################ + +option_list <- list(make_option(c("-i", "--input_files"), type="character", default=NULL, help="Comma-separated list of mosdepth regions output file (typically end in *.regions.bed.gz)", metavar="input_files"), + make_option(c("-s", "--input_suffix"), type="character", default='.regions.bed.gz', help="Portion of filename after sample name to trim for plot title e.g. '.regions.bed.gz' if 'SAMPLE1.regions.bed.gz'", metavar="input_suffix"), + make_option(c("-o", "--output_dir"), type="character", default='./', help="Output directory", metavar="path"), + make_option(c("-p", "--output_suffix"), type="character", default='regions', help="Output suffix", metavar="output_suffix")) + +opt_parser <- OptionParser(option_list=option_list) +opt <- parse_args(opt_parser) + +## Check input files +INPUT_FILES <- unique(unlist(strsplit(opt$input_files,","))) +if (length(INPUT_FILES) == 0) { + print_help(opt_parser) + stop("At least one input file must be supplied", call.=FALSE) +} +if (!all(file.exists(INPUT_FILES))) { + stop(paste("The following input files don't exist:",paste(INPUT_FILES[!file.exists(INPUT_FILES)], sep='', collapse=' '), sep=' '), call.=FALSE) +} + +## Check the output directory has a trailing slash, if not add one +OUTDIR <- opt$output_dir +if (tail(strsplit(OUTDIR,"")[[1]],1)!="/") { + OUTDIR <- paste(OUTDIR,"/",sep='') +} +## Create the directory if it doesn't already exist. +if (!file.exists(OUTDIR)) { + dir.create(OUTDIR,recursive=TRUE) +} + +OUTSUFFIX <- trimws(opt$output_suffix, "both", whitespace = "\\.") + +################################################ +################################################ +## READ IN DATA ## +################################################ +################################################ + +## Read in data +dat <- NULL +for (input_file in INPUT_FILES) { + sample = gsub(opt$input_suffix,'',basename(input_file)) + dat <- rbind(dat, cbind(read.delim(input_file, header=FALSE, sep='\t', stringsAsFactors=FALSE, check.names=FALSE)[,-6], sample, stringsAsFactors=F)) +} + +## Reformat table +if (ncol(dat) == 6) { + colnames(dat) <- c('chrom', 'start','end', 'region', 'coverage', 'sample') + dat$region <- factor(dat$region, levels=unique(dat$region[order(dat$start)])) +} else { + colnames(dat) <- c('chrom', 'start','end', 'coverage', 'sample') +} +dat$sample <- factor(dat$sample, levels=sort(unique(dat$sample))) + +## Write merged coverage data for all samples to file +outfile <- paste(OUTDIR,"all_samples.",OUTSUFFIX,".coverage.tsv", sep='') +write.table(dat, file=outfile, col.names=TRUE, row.names=FALSE, sep='\t', quote=FALSE) + +################################################ +################################################ +## PER-SAMPLE COVERAGE PLOTS ## +################################################ +################################################ + +for (sample in unique(dat$sample)) { + sample_dat <- dat[dat$sample == sample,] + outfile <- paste(OUTDIR,sample,".",OUTSUFFIX,".coverage.tsv", sep='') + write.table(sample_dat,file=outfile, col.names=TRUE, row.names=FALSE, sep='\t', quote=FALSE) + sample_dat$coverage <- sample_dat$coverage + 1 + + if (ncol(sample_dat) == 6) { + plot <- ggplot(sample_dat,aes(x=region,y=coverage)) + + geom_bar(stat="identity", fill="#D55E00", width=0.6) + + theme_bw() + + theme(plot.title=element_text(size=10), + axis.text.x=element_text(size=10), + axis.text.y=element_text(size=8)) + + coord_flip() + + scale_x_discrete(expand=c(0, 0)) + + scale_y_continuous(trans=log10_trans(), + breaks=10^c(0:10), + labels=trans_format('log10', math_format(10^.x)), + expand=c(0, 0)) + + expand_limits(y=1) + + ylab(bquote('log'[10]~'(Coverage+1)')) + + xlab('Amplicon') + + ggtitle(paste(sample,'median coverage per amplicon')) + + outfile <- paste(OUTDIR,sample,".",OUTSUFFIX,".coverage.pdf", sep='') + ggsave(file=outfile, plot, height=3+(0.3*length(unique(sample_dat$region))), width=16, units="cm") + } else { + plot <- ggplot(sample_dat,aes(x=end,y=coverage)) + + geom_ribbon(aes(ymin=0, ymax=coverage), fill="#D55E00", data=) + + theme_bw() + + scale_x_continuous(expand=c(0, 0)) + + scale_y_continuous(trans=log10_trans(), + breaks=10^c(0:10), + labels=trans_format('log10', math_format(10^.x)), + expand=c(0, 0)) + + expand_limits(y=1) + + ylab(bquote('log'[10]~'(Coverage+1)')) + + xlab('Position (bp)') + + ggtitle(paste(sample,'coverage')) + + outfile <- paste(OUTDIR,sample,".",OUTSUFFIX,".coverage.pdf", sep='') + ggsave(file=outfile, plot, height=6, width=12, units="in") + } +} + +################################################ +################################################ +## REGION-BASED HEATMAP ACROSS ALL SAMPLES ## +################################################ +################################################ + +if (ncol(dat) == 6 && length(INPUT_FILES) > 1) { + mat <- spread(dat[,c("sample", "region", "coverage")], sample, coverage, fill=NA, convert=FALSE) + rownames(mat) <- mat[,1] + mat <- t(as.matrix(log10(mat[,-1] + 1))) + heatmap <- Heatmap(mat, + column_title = "Heatmap to show median amplicon coverage across samples", + name = "log10(Coverage+1)", + cluster_rows = TRUE, + cluster_columns = FALSE, + show_row_names = TRUE, + show_column_names = TRUE, + column_title_side = "top", + column_names_side = "bottom", + row_names_side = "right", + rect_gp = gpar(col="white", lwd=1), + show_heatmap_legend = TRUE, + heatmap_legend_param = list(title_gp=gpar(fontsize=12, fontface="bold"), labels_gp=gpar(fontsize=10), direction="horizontal"), + column_title_gp = gpar(fontsize=14, fontface="bold"), + row_names_gp = gpar(fontsize=10, fontface="bold"), + column_names_gp = gpar(fontsize=10, fontface="bold"), + height = unit(5, "mm")*nrow(mat), + width = unit(5, "mm")*ncol(mat), + col = viridis(50)) + + ## Size of heatmaps scaled based on matrix dimensions: https://jokergoo.github.io/ComplexHeatmap-reference/book/other-tricks.html#set-the-same-cell-size-for-different-heatmaps-with-different-dimensions + height = 0.1969*nrow(mat) + (2*1.5) + width = 0.1969*ncol(mat) + (2*1.5) + outfile <- paste(OUTDIR,"all_samples.",OUTSUFFIX,".heatmap.pdf", sep='') + pdf(file=outfile, height=height, width=width) + draw(heatmap, heatmap_legend_side="bottom") + dev.off() +} + +################################################ +################################################ +################################################ +################################################ diff --git a/bin/scrape_software_versions.py b/bin/scrape_software_versions.py index 574e6040..adeaa10e 100755 --- a/bin/scrape_software_versions.py +++ b/bin/scrape_software_versions.py @@ -12,23 +12,25 @@ 'Bowtie 2': ['v_bowtie2.txt', r"bowtie2-align-s\sversion\s(\S+)"], 'Samtools': ['v_samtools.txt', r"samtools\s(\S+)"], 'BEDTools': ['v_bedtools.txt', r"bedtools\sv(\S+)"], + 'Mosdepth': ['v_mosdepth.txt', r"mosdepth\s(\S+)"], 'Picard': ['v_picard.txt', r"\n(\S+)"], 'iVar': ['v_ivar.txt', r"iVar\sversion\s(\S+)"], 'VarScan 2': ['v_varscan.txt', r"VarScan\sv(\S+)"], + 'BCFTools': ['v_bcftools.txt', r"bcftools\s(\S+)"], 'SnpEff': ['v_snpeff.txt', r"SnpEff\s(\S+)"], 'SnpSift': ['v_snpsift.txt', r"SnpSift\sversion\s(\S+)"], - 'BCFTools': ['v_bcftools.txt', r"bcftools\s(\S+)"], + 'QUAST': ['v_quast.txt', r"QUAST\sv(\S+)"], 'Cutadapt': ['v_cutadapt.txt', r"(\S+)"], 'Kraken2': ['v_kraken2.txt', r"Kraken\sversion\s(\S+)"], 'SPAdes': ['v_spades.txt', r"SPAdes\sgenome\sassembler\sv(\S+)"], 'Unicycler': ['v_unicycler.txt', r"Unicycler\sv(\S+)"], 'minia': ['v_minia.txt', r"Minia\sversion\s(\S+)"], - 'Minimap2': ['v_minimap2.txt', r"(\S+)"], - 'vg': ['v_vg.txt', r"vg\sversion\sv(\S+)"], 'BLAST': ['v_blast.txt', r"blastn:\s(\S+)"], 'ABACAS': ['v_abacas.txt', r"ABACAS.(\S+)"], - 'QUAST': ['v_quast.txt', r"QUAST\sv(\S+)"], + 'plasmidID': ['v_plasmidid.txt', r"(\S+)"], 'Bandage': ['v_bandage.txt', r"Version:\s(\S+)"], + 'Minimap2': ['v_minimap2.txt', r"(\S+)"], + 'vg': ['v_vg.txt', r"vg\sversion\sv(\S+)"], 'R': ['v_R.txt', r"R\sversion\s(\S+)"], 'MultiQC': ['v_multiqc.txt', r"multiqc,\sversion\s(\S+)"] } @@ -41,23 +43,25 @@ results['Bowtie 2'] = 'N/A' results['Samtools'] = 'N/A' results['BEDTools'] = 'N/A' +results['Mosdepth'] = 'N/A' results['Picard'] = 'N/A' results['iVar'] = 'N/A' results['VarScan 2'] = 'N/A' +results['BCFTools'] = 'N/A' results['SnpEff'] = 'N/A' results['SnpSift'] = 'N/A' -results['BCFTools'] = 'N/A' +results['QUAST'] = 'N/A' results['Cutadapt'] = 'N/A' results['Kraken2'] = 'N/A' results['SPAdes'] = 'N/A' results['Unicycler'] = 'N/A' results['minia'] = 'N/A' -results['Minimap2'] = 'N/A' -results['vg'] = 'N/A' results['BLAST'] = 'N/A' results['ABACAS'] = 'N/A' -results['QUAST'] = 'N/A' +results['plasmidID'] = 'N/A' results['Bandage'] = 'N/A' +results['Minimap2'] = 'N/A' +results['vg'] = 'N/A' results['R'] = 'N/A' results['MultiQC'] = 'N/A' diff --git a/docs/html/multiqc_report.html b/docs/html/multiqc_report.html index 97033fec..e8de5c3d 100644 --- a/docs/html/multiqc_report.html +++ b/docs/html/multiqc_report.html @@ -23,7 +23,7 @@ MultiQC Report - + @@ -5456,31 +5456,41 @@

  • - Base Distribution + Insert Size
  • - Insert Size + Mark Duplicates
  • - Mark Duplicates + WGS Coverage
  • - Mean Base Quality by Cycle + WGS Filtered Bases
  • + + + + + + +
  • + VARIANTS: mosdepth +
      +
    • - Base Quality Distribution + Coverage distribution
    • - WGS Coverage + Coverage plot
    • - WGS Filtered Bases + Average coverage per contig
    • @@ -6425,12 +6435,12 @@

      JavaScript Disabled

      Report - generated on 2020-05-31, 21:33 + generated on 2020-06-22, 18:47 based on data in: - nf-core/viralrecon/work/fd/3c8e92b3222a535ed19d3834f6d258

      + nfcore/viralrecon/test_full/work/fb/bfe7e69a95d108076e42773b526fd3

      @@ -6496,7 +6506,7 @@

      Variant calling metrics

      -
      Sample# Input reads# Trimmed reads (fastp)% Mapped reads (viral)# Trimmed reads (iVar)# Duplicate reads# Reads after MarkDuplicatesInsert size meanInsert size std devCoverage meanCoverage std dev% Coverage > 10x# High conf SNPs (VarScan 2)# High conf INDELs (VarScan 2)# High conf SNPs (iVar)# High conf INDELs (iVar)# High conf SNPs (BCFTools)# High conf INDELs (BCFTools)# Missense variants (VarScan 2)# Missense variants (iVar)# Missense variants (BCFTools)# Ns per 100kb consensus (VarScan 2)# Ns per 100kb consensus (iVar)# Ns per 100kb consensus (BCFTools)
      sample1
      2755026
      2384570
      100
      2371846
      2216597
      2371846
      523
      215
      1096
      479
      1
      6
      0
      6
      0
      6
      0
      2
      2
      6
      224
      167
      224
      sample2
      2139958
      1913910
      99
      1890837
      1816623
      1890837
      480
      177
      499
      312
      1
      6
      0
      7
      0
      7
      0
      4
      5
      5
      338
      292
      338
      + Sample# Input reads# Trimmed reads (fastp)% Mapped reads (viral)# Trimmed reads (iVar)# Duplicate reads# Reads after MarkDuplicatesInsert size meanInsert size std devCoverage meanCoverage std dev% Coverage > 10x# High conf SNPs (VarScan 2)# High conf INDELs (VarScan 2)# High conf SNPs (iVar)# High conf INDELs (iVar)# High conf SNPs (BCFTools)# High conf INDELs (BCFTools)# Missense variants (VarScan 2)# Missense variants (iVar)# Missense variants (BCFTools)# Ns per 100kb consensus (VarScan 2)# Ns per 100kb consensus (iVar)# Ns per 100kb consensus (BCFTools)sample1
      2755026
      2384570
      100
      2372162
      2216894
      2372162
      523
      216
      1095
      479
      1
      6
      0
      6
      0
      6
      0
      2
      2
      2
      224
      164
      224
      sample2
      2139958
      1913910
      99
      1891311
      1816848
      1891311
      478
      179
      498
      311
      1
      7
      0
      7
      0
      6
      0
      5
      5
      5
      338
      288
      338