Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/actions/nf-test/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ runs:
--tap=test.tap \
--shard ${{ inputs.shard }}/${{ inputs.total_shards }}

# Save the absolute path of the test.tap file to the output
echo "tap_file_path=$(realpath test.tap)" >> $GITHUB_OUTPUT
# Save the absolute path of the test.tap file to the output
echo "tap_file_path=$(realpath test.tap)" >> $GITHUB_OUTPUT

- name: Generate test summary
if: always()
Expand Down
3 changes: 0 additions & 3 deletions .github/workflows/awsfulltest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@ jobs:

- name: Launch workflow via Seqera Platform
uses: seqeralabs/action-tower-launch@v2
# TODO nf-core: You can customise AWS full pipeline tests as required
# Add full size test data (but still relatively small datasets for few samples)
# on the `test_full.config` test runs with only one set of parameters
with:
workspace_id: ${{ vars.TOWER_WORKSPACE_ID }}
access_token: ${{ secrets.TOWER_ACCESS_TOKEN }}
Expand Down
76 changes: 76 additions & 0 deletions .github/workflows/nf-test-entry.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
name: Run nf-test
on:
pull_request:
paths-ignore:
- "docs/**"
- "**/meta.yml"
- "**/*.md"
- "**/*.png"
- "**/*.svg"
release:
types: [published]
workflow_dispatch:

# Cancel if a newer run is started
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

jobs:
# NOTE(SW): only run large tests on up-sized instances to avoid wasting compute (in this case the just test profile)
# NOTE(SW): the test profile requires more than 32 GB memory and with the available RunsOn instances, the 32cpu-linux-x64 runner is the best fit despite being excessively overprovisioned re vCPUs
# https://runs-on.com/runners/linux/
# Instance type vCPUs Memory (GiB) RunsOn name
# m7a.medium 1 4 1cpu-linux-x64
# m7i.large 2 8 2cpu-linux-x64
# m7i.xlarge 4 16 4cpu-linux-x64
# c7i.2xlarge 8 16 8cpu-linux-x64
# c7i.4xlarge 16 32 16cpu-linux-x64
# m7i-flex.8xlarge 32 128 32cpu-linux-x64
# c7i.12xlarge 48 96 48cpu-linux-x64
# c7a.16xlarge 64 128 64cpu-linux-x64
nf-test:
uses: ./.github/workflows/nf-test-reusable.yml
strategy:
matrix:
include:
- name: small
tags: cicd
runson_instance_type: 4cpu-linux-x64
runson_volume_size: 60gb
- name: large
tags: cicd.large
runson_instance_type: 32cpu-linux-x64
runson_volume_size: 200gb
with:
name: ${{ matrix.name }}
tags: ${{ matrix.tags }}
runson_instance_type: ${{ matrix.runson_instance_type }}
runson_volume_size: ${{ matrix.runson_volume_size }}

confirm-pass:
needs: [nf-test]
if: always()
runs-on: # use self-hosted runners
- runs-on=${{ github.run_id }}-confirm-pass
- runner=1cpu-linux-x64
steps:
- name: One or more tests failed (excluding latest-everything)
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1

- name: One or more tests cancelled
if: ${{ contains(needs.*.result, 'cancelled') }}
run: exit 1

- name: All tests ok
if: ${{ contains(needs.*.result, 'success') }}
run: exit 0

- name: debug-print
if: always()
run: |
echo "::group::DEBUG: `needs` Contents"
echo "DEBUG: toJSON(needs) = ${{ toJSON(needs) }}"
echo "DEBUG: toJSON(needs.*.result) = ${{ toJSON(needs.*.result) }}"
echo "::endgroup::"
Original file line number Diff line number Diff line change
@@ -1,24 +1,26 @@
name: Run nf-test
name: Run nf-test on selected test set
on:
pull_request:
paths-ignore:
- "docs/**"
- "**/meta.yml"
- "**/*.md"
- "**/*.png"
- "**/*.svg"
release:
types: [published]
workflow_dispatch:

# Cancel if a newer run is started
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
workflow_call:
inputs:
name:
description: "Name of test set"
type: string
required: true
tags:
description: "Tags to pass as argument for nf-test --tag parameter"
type: string
required: true
runson_instance_type:
description: "RunsOn runner instance type"
type: string
required: true
runson_volume_size:
description: "RunsOn runner instance volume"
type: string
required: true

env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NFT_TAGS: "cicd"
NFT_VER: "0.9.3"
NFT_WORKDIR: "~"
NXF_ANSI_LOG: false
Expand All @@ -27,10 +29,10 @@ env:

jobs:
nf-test-changes:
name: nf-test-changes
name: nf-test-changes (${{ inputs.name }})
runs-on: # use self-hosted runners
- runs-on=${{ github.run_id }}-nf-test-changes
- runner=4cpu-linux-x64
- runner=1cpu-linux-x64
outputs:
shard: ${{ steps.set-shards.outputs.shard }}
total_shards: ${{ steps.set-shards.outputs.total_shards }}
Expand All @@ -52,21 +54,21 @@ jobs:
NFT_VER: ${{ env.NFT_VER }}
with:
max_shards: 7
tags: ${{ env.NFT_TAGS }}
tags: ${{ inputs.tags }}

- name: debug
run: |
echo ${{ steps.set-shards.outputs.shard }}
echo ${{ steps.set-shards.outputs.total_shards }}

nf-test:
name: "${{ matrix.profile }} | ${{ matrix.NXF_VER }} | ${{ matrix.shard }}/${{ needs.nf-test-changes.outputs.total_shards }}"
name: " ${{ inputs.name }} tests | ${{ matrix.profile }} | ${{ matrix.NXF_VER }} | ${{ matrix.shard }}/${{ needs.nf-test-changes.outputs.total_shards }}"
needs: [nf-test-changes]
if: ${{ needs.nf-test-changes.outputs.total_shards != '0' }}
runs-on: # use self-hosted runners
- runs-on=${{ github.run_id }}-nf-test
- runner=4cpu-linux-x64
- disk=large
- runner=${{ inputs.runson_instance_type }}
- volume=${{ inputs.runson_volume_size }}
strategy:
fail-fast: false
matrix:
Expand Down Expand Up @@ -103,7 +105,7 @@ jobs:
profile: ${{ matrix.profile }}
shard: ${{ matrix.shard }}
total_shards: ${{ env.TOTAL_SHARDS }}
tags: ${{ env.NFT_TAGS }}
tags: ${{ inputs.tags }}

- name: Report test status
if: ${{ always() }}
Expand All @@ -119,30 +121,3 @@ jobs:
exit 1
fi
fi

confirm-pass:
needs: [nf-test]
if: always()
runs-on: # use self-hosted runners
- runs-on=${{ github.run_id }}-confirm-pass
- runner=2cpu-linux-x64
steps:
- name: One or more tests failed (excluding latest-everything)
if: ${{ contains(needs.*.result, 'failure') }}
run: exit 1

- name: One or more tests cancelled
if: ${{ contains(needs.*.result, 'cancelled') }}
run: exit 1

- name: All tests ok
if: ${{ contains(needs.*.result, 'success') }}
run: exit 0

- name: debug-print
if: always()
run: |
echo "::group::DEBUG: `needs` Contents"
echo "DEBUG: toJSON(needs) = ${{ toJSON(needs) }}"
echo "DEBUG: toJSON(needs.*.result) = ${{ toJSON(needs.*.result) }}"
echo "::endgroup::"
1 change: 1 addition & 0 deletions .nf-core.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ template:
lint:
actions_ci: false
files_exist:
- .github/workflows/nf-test.yml
- lib/Utils.groovy
- lib/WorkflowMain.groovy
- lib/WorkflowOncoanalyser.groovy
Expand Down
8 changes: 1 addition & 7 deletions conf/test_full.config
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,12 @@
----------------------------------------------------------------------------------------
*/

process {
withName: 'PURPLE' {
ext.args = '-min_purity 1 -max_purity 1 -min_ploidy 2 -max_ploidy 2'
}
}

params {
config_profile_name = 'Full test profile'
config_profile_description = 'Full test dataset to check pipeline function'

// Input data for full size test
input = params.pipelines_testdata_base_path + '/samplesheet/fastq_eval.subject_a.wgts.tndna_trna.minimal.csv'
input = params.pipelines_testdata_base_path + '/samplesheet/full_size.hcc1395.wgts.tndna_trna.fastq.csv'

// Analysis config
mode = 'wgts'
Expand Down
68 changes: 34 additions & 34 deletions tests/default.nf.test.snap
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,29 @@
"subject_a/amber",
"subject_a/amber/placeholder",
"subject_a/bamtools",
"subject_a/bamtools/subject_a_subject_a.normal_bamtools",
"subject_a/bamtools/subject_a_subject_a.normal_bamtools/subject_a.normal.bam_metric.coverage.tsv",
"subject_a/bamtools/subject_a_subject_a.normal_bamtools/subject_a.normal.bam_metric.flag_counts.tsv",
"subject_a/bamtools/subject_a_subject_a.normal_bamtools/subject_a.normal.bam_metric.frag_length.tsv",
"subject_a/bamtools/subject_a_subject_a.normal_bamtools/subject_a.normal.bam_metric.partition_stats.tsv",
"subject_a/bamtools/subject_a_subject_a.normal_bamtools/subject_a.normal.bam_metric.summary.tsv",
"subject_a/bamtools/subject_a_subject_a.tumor_bamtools",
"subject_a/bamtools/subject_a_subject_a.tumor_bamtools/subject_a.tumor.bam_metric.coverage.tsv",
"subject_a/bamtools/subject_a_subject_a.tumor_bamtools/subject_a.tumor.bam_metric.flag_counts.tsv",
"subject_a/bamtools/subject_a_subject_a.tumor_bamtools/subject_a.tumor.bam_metric.frag_length.tsv",
"subject_a/bamtools/subject_a_subject_a.tumor_bamtools/subject_a.tumor.bam_metric.partition_stats.tsv",
"subject_a/bamtools/subject_a_subject_a.tumor_bamtools/subject_a.tumor.bam_metric.summary.tsv",
"subject_a/bamtools/subject_a.normal",
"subject_a/bamtools/subject_a.normal/subject_a.normal.bam_metric.coverage.tsv",
"subject_a/bamtools/subject_a.normal/subject_a.normal.bam_metric.flag_counts.tsv",
"subject_a/bamtools/subject_a.normal/subject_a.normal.bam_metric.frag_length.tsv",
"subject_a/bamtools/subject_a.normal/subject_a.normal.bam_metric.partition_stats.tsv",
"subject_a/bamtools/subject_a.normal/subject_a.normal.bam_metric.summary.tsv",
"subject_a/bamtools/subject_a.tumor",
"subject_a/bamtools/subject_a.tumor/subject_a.tumor.bam_metric.coverage.tsv",
"subject_a/bamtools/subject_a.tumor/subject_a.tumor.bam_metric.flag_counts.tsv",
"subject_a/bamtools/subject_a.tumor/subject_a.tumor.bam_metric.frag_length.tsv",
"subject_a/bamtools/subject_a.tumor/subject_a.tumor.bam_metric.partition_stats.tsv",
"subject_a/bamtools/subject_a.tumor/subject_a.tumor.bam_metric.summary.tsv",
"subject_a/chord",
"subject_a/chord/subject_a.tumor.chord.mutation_contexts.tsv",
"subject_a/chord/subject_a.tumor.chord.prediction.tsv",
"subject_a/cider",
"subject_a/cider/subject_a.tumor.cider.alignment_match.tsv.gz",
"subject_a/cider/subject_a.tumor.cider.bam",
"subject_a/cider/subject_a.tumor.cider.blastn_match.tsv.gz",
"subject_a/cider/subject_a.tumor.cider.layout.gz",
"subject_a/cider/subject_a.tumor.cider.locus_stats.tsv",
"subject_a/cider/subject_a.tumor.cider.vdj.tsv.gz",
"subject_a/cider/subject_a.tumor_rna.cider.alignment_match.tsv.gz",
"subject_a/cider/subject_a.tumor_rna.cider.bam",
"subject_a/cider/subject_a.tumor_rna.cider.blastn_match.tsv.gz",
"subject_a/cider/subject_a.tumor_rna.cider.layout.gz",
"subject_a/cider/subject_a.tumor_rna.cider.locus_stats.tsv",
"subject_a/cider/subject_a.tumor_rna.cider.vdj.tsv.gz",
Expand Down Expand Up @@ -277,6 +277,23 @@
"subject_a/purple/subject_a.tumor.purple.somatic.vcf.gz",
"subject_a/purple/subject_a.tumor.purple.sv.germline.vcf.gz",
"subject_a/purple/subject_a.tumor.purple.sv.vcf.gz",
"subject_a/sage",
"subject_a/sage/germline",
"subject_a/sage/germline/subject_a.normal.gene.coverage.tsv",
"subject_a/sage/germline/subject_a.normal.sage.bqr.png",
"subject_a/sage/germline/subject_a.normal.sage.bqr.tsv",
"subject_a/sage/germline/subject_a.tumor.sage.bqr.png",
"subject_a/sage/germline/subject_a.tumor.sage.bqr.tsv",
"subject_a/sage/germline/subject_a.tumor.sage.germline.vcf.gz",
"subject_a/sage/germline/subject_a.tumor.sage.germline.vcf.gz.tbi",
"subject_a/sage/somatic",
"subject_a/sage/somatic/subject_a.normal.sage.bqr.png",
"subject_a/sage/somatic/subject_a.normal.sage.bqr.tsv",
"subject_a/sage/somatic/subject_a.tumor.gene.coverage.tsv",
"subject_a/sage/somatic/subject_a.tumor.sage.bqr.png",
"subject_a/sage/somatic/subject_a.tumor.sage.bqr.tsv",
"subject_a/sage/somatic/subject_a.tumor.sage.somatic.vcf.gz",
"subject_a/sage/somatic/subject_a.tumor.sage.somatic.vcf.gz.tbi",
"subject_a/sage_append",
"subject_a/sage_append/germline",
"subject_a/sage_append/germline/subject_a.normal.frag_lengths.tsv.gz",
Expand All @@ -288,23 +305,6 @@
"subject_a/sage_append/somatic/subject_a.tumor.sage.append.vcf.gz",
"subject_a/sage_append/somatic/subject_a.tumor.sage.append.vcf.gz.tbi",
"subject_a/sage_append/somatic/subject_a.tumor_query.sage.bqr.tsv",
"subject_a/sage_calling",
"subject_a/sage_calling/germline",
"subject_a/sage_calling/germline/subject_a.normal.gene.coverage.tsv",
"subject_a/sage_calling/germline/subject_a.normal.sage.bqr.png",
"subject_a/sage_calling/germline/subject_a.normal.sage.bqr.tsv",
"subject_a/sage_calling/germline/subject_a.tumor.sage.bqr.png",
"subject_a/sage_calling/germline/subject_a.tumor.sage.bqr.tsv",
"subject_a/sage_calling/germline/subject_a.tumor.sage.germline.vcf.gz",
"subject_a/sage_calling/germline/subject_a.tumor.sage.germline.vcf.gz.tbi",
"subject_a/sage_calling/somatic",
"subject_a/sage_calling/somatic/subject_a.normal.sage.bqr.png",
"subject_a/sage_calling/somatic/subject_a.normal.sage.bqr.tsv",
"subject_a/sage_calling/somatic/subject_a.tumor.gene.coverage.tsv",
"subject_a/sage_calling/somatic/subject_a.tumor.sage.bqr.png",
"subject_a/sage_calling/somatic/subject_a.tumor.sage.bqr.tsv",
"subject_a/sage_calling/somatic/subject_a.tumor.sage.somatic.vcf.gz",
"subject_a/sage_calling/somatic/subject_a.tumor.sage.somatic.vcf.gz.tbi",
"subject_a/sigs",
"subject_a/sigs/placeholder",
"subject_a/teal",
Expand All @@ -323,9 +323,9 @@
]
],
"meta": {
"nf-test": "0.9.2",
"nextflow": "25.04.6"
"nf-test": "0.9.3",
"nextflow": "25.10.2"
},
"timestamp": "2025-08-12T16:05:52.737409"
"timestamp": "2025-12-03T04:25:08.751931146"
}
}
1 change: 1 addition & 0 deletions tests/profile_test.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ nextflow_pipeline {
name "Test pipeline"
script "../main.nf"
tag "pipeline"
tag "cicd.large"

profile "test"

Expand Down
Loading