Skip to content

Commit

Permalink
feat: add amber targeted panel mode
Browse files Browse the repository at this point in the history
also some fixes for running whole pipeline through parabricks from fastqs
  • Loading branch information
shihabdider committed Oct 31, 2024
1 parent 835e916 commit 7920518
Show file tree
Hide file tree
Showing 5 changed files with 32 additions and 20 deletions.
4 changes: 2 additions & 2 deletions conf/base.config
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ process {
memory = { check_max( 30.GB * task.attempt, 'memory' ) }
}
withName: 'PARABRICKS_FQ2BAM' {
cpus = { check_max( 4 * task.attempt, 'cpus' ) }
memory = { check_max( 64.GB * task.attempt, 'memory' ) }
cpus = { check_max( 24 * task.attempt, 'cpus' ) }
memory = { check_max( 250.GB * task.attempt, 'memory' ) }
accelerator = { check_max( 1 * task.attempt, 'accelerator') }
}
withName: 'SAMTOOLS_MERGE|SAMTOOLS_INDEX' {
Expand Down
2 changes: 1 addition & 1 deletion modules/local/amber/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ process AMBER {
tuple val(meta), path(tumor_bam), path(tumor_bai), path(normal_bam), path(normal_bai)
val genome_ver
path heterozygous_sites
path target_region_bed
tuple val(meta2), path(target_region_bed)

output:
tuple val(meta), path('amber/'), emit: amber_dir
Expand Down
3 changes: 3 additions & 0 deletions nextflow.config
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ params {
// SV filtering (tumor only)
pad_junc_filter = 1000 // Default Padding for SV Junction filtering

// AMBER options
target_bed_amber = null // pass a bed file with target regions for running AMBER on targeted sequencing sample

// HetPileups options
filter_hets = "TRUE"
max_depth = 1000
Expand Down
14 changes: 11 additions & 3 deletions subworkflows/local/bam_amber/main.nf
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,16 @@ include { AMBER } from '../../../modules/local/amber/main'
include { MAKE_HET_SITES } from '../../../modules/local/amber/main'

//AMBER
genome_ver = WorkflowNfcasereports.create_value_channel(params.genome_ver_amber)
het_sites = WorkflowNfcasereports.create_file_channel(params.het_sites_amber)
genome_ver = WorkflowNfcasereports.create_value_channel(params.genome_ver_amber)
het_sites = WorkflowNfcasereports.create_file_channel(params.het_sites_amber)
if (params.target_bed_amber != null) {
target_bed_input = Channel.fromPath(params.target_bed_amber)
.map{ it -> [ [id: 'target_bed'], it ] }
} else {
target_bed_input = Channel.value([id: 'target_bed'])
.map{ meta -> [ meta, [] ] }
}


workflow BAM_AMBER {
take:
Expand All @@ -22,7 +30,7 @@ workflow BAM_AMBER {
input,
genome_ver,
het_sites,
[]
target_bed_input // [meta, target_bed]
)

amber_dir = amber_dir.mix(AMBER.out.amber_dir)
Expand Down
29 changes: 15 additions & 14 deletions workflows/nfcasereports.nf
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ inputs = inputs
ch_items.meta = ch_items.meta + [id: "${ch_items.meta.sample}-${ch_items.meta.lane}".toString()]
def CN = params.seq_center ? "CN:${params.seq_center}\\t" : ''

def flowcell = flowcellLaneFromFastq(fastq_1)
def flowcell = flowcellLaneFromFastq(ch_items.fastq_1)
// Don't use a random element for ID, it breaks resuming
def read_group = "\"@RG\\tID:${flowcell}.${ch_items.meta.sample}.${ch_items.meta.lane}\\t${CN}PU:${ch_items.meta.lane}\\tSM:${ch_items.meta.patient}_${ch_items.meta.sample}\\tLB:${ch_items.meta.sample}\\tDS:${params.fasta}\\tPL:${params.seq_platform}\""

Expand Down Expand Up @@ -872,13 +872,13 @@ workflow NFCASEREPORTS {

// Post-alignment QC
if (tools_used.contains("all") || tools_used.contains("bamqc")) {
bam_qc_inputs = inputs.map { it -> [it.meta.id] }
bam_qc_calling = alignment_bams_final
.join(bam_qc_inputs)
.map { it -> [ it[1], it[2], it[3] ] } // meta, bam, bai
bam_qc_inputs = inputs.map { it -> [it.meta.sample] }
bam_qc_calling = bam_qc_inputs
.join(alignment_bams_final)
.map { id, meta, bam, bai -> [ meta, bam, bai ] }

// omit meta since it is not used in the BAM_QC
dict_path = dict.map{ meta, dict -> dict }
dict_path = dict.map{ meta, dict -> [dict] }
BAM_QC(bam_qc_calling, dict_path)

// Gather QC
Expand All @@ -891,7 +891,7 @@ workflow NFCASEREPORTS {
if (tools_used.contains("all") || tools_used.contains("gridss")) {

// Filter out bams for which SV calling has already been done
bam_sv_inputs = inputs.filter { it.vcf.isEmpty() }.map { it -> [it.meta.id] }
bam_sv_inputs = inputs.filter { it.vcf.isEmpty() }.map { it -> [it.meta.sample] }
bam_sv_calling = alignment_bams_final
.join(bam_sv_inputs)
.map { it -> [ it[1], it[2], it[3] ] } // meta, bam, bai
Expand Down Expand Up @@ -970,7 +970,8 @@ workflow NFCASEREPORTS {
// AMBER
// ##############################
if (tools_used.contains("all") || tools_used.contains("amber")) {
bam_amber_inputs = inputs.filter { it.hets.isEmpty() && it.amber_dir.isEmpty() }.map { it -> [it.meta.id] }
bam_amber_inputs = inputs.filter { it.hets.isEmpty() && it.amber_dir.isEmpty() }.map { it -> [it.meta.sample] }
alignment_bams_final = alignment_bams_final
bam_amber_calling = alignment_bams_final
.join(bam_amber_inputs)
.map{ it -> [ it[1], it[2], it[3] ] } // meta, bam, bai
Expand Down Expand Up @@ -998,14 +999,14 @@ workflow NFCASEREPORTS {
}

// All tumor samples
bam_amber_tumor_for_crossing = bam_amber_status.tumor.map{ meta, bam, bai -> [ meta.patient, meta + [id: meta.sample], bam, bai ] }
bam_amber_tumor_for_crossing = bam_amber_status.tumor.map{ meta, bam, bai -> [ meta.patient, meta, bam, bai ] }

if (params.tumor_only) {
// add empty arrays to stand-in for normals
bam_amber_pair = bam_amber_status.tumor.map{ meta, bam, bai -> [ meta + [tumor_id: meta.sample], bam, bai, [], [] ] }
} else {
// All normal samples
bam_amber_normal_for_crossing = bam_amber_status.normal.map{ meta, bam, bai -> [ meta.patient, meta + [id: meta.sample], bam, bai ] }
bam_amber_normal_for_crossing = bam_amber_status.normal.map{ meta, bam, bai -> [ meta.patient, meta, bam, bai ] }
// Crossing the normal and tumor samples to create tumor and normal pairs
bam_amber_pair = bam_amber_normal_for_crossing.cross(bam_amber_tumor_for_crossing)
.map { normal, tumor ->
Expand Down Expand Up @@ -1041,7 +1042,7 @@ workflow NFCASEREPORTS {
// FRAGCOUNTER
// ##############################
if (tools_used.contains("all") || tools_used.contains("fragcounter")) {
bam_fragcounter_inputs = inputs.filter { it.frag_cov.isEmpty() }.map { it -> [it.meta.id] }
bam_fragcounter_inputs = inputs.filter { it.frag_cov.isEmpty() }.map { it -> [it.meta.sample] }
bam_fragcounter_calling = alignment_bams_final
.join(bam_fragcounter_inputs)
.map{ it -> [ it[1], it[2], it[3] ] } // meta, bam, bai
Expand Down Expand Up @@ -1194,11 +1195,11 @@ workflow NFCASEREPORTS {
if (params.tumor_only) {
bam_snv_inputs = inputs
.filter { it.snv_somatic_vcf.isEmpty() }
.map { it -> [it.meta.id] }
.map { it -> [it.meta.sample] }
} else {
bam_snv_inputs = inputs
.filter { it.snv_somatic_vcf.isEmpty() || it.snv_germline_vcf.isEmpty() }
.map { it -> [it.meta.id] }
.map { it -> [it.meta.sample] }
}

bam_snv_calling = alignment_bams_final
Expand Down Expand Up @@ -1350,7 +1351,7 @@ workflow NFCASEREPORTS {
// ##############################
if (tools_used.contains("all") || tools_used.contains("purple")) {
// this channel is for merging with alignment_bams_final
purple_inputs = inputs.filter { it.ploidy.isEmpty() }.map { it -> [it.meta.id] }
purple_inputs = inputs.filter { it.ploidy.isEmpty() }.map { it -> [it.meta.sample] }
// need a channel with patient and meta for merging with rest
purple_inputs_for_merge = inputs.filter { it.ploidy.isEmpty() }.map { it -> [it.meta.patient, it.meta] }

Expand Down

0 comments on commit 7920518

Please sign in to comment.