forked from nf-core/configs
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathstjude.config
91 lines (81 loc) · 2.89 KB
/
stjude.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
/*
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Nextflow config file for St. Jude Children's Research Hospital's High Performance Research Cluster (HPCF)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Author: Haidong Yi
Mail: [email protected]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
params {
config_profile_contact = "Haidong Yi ([email protected])"
config_profile_description = "St. Jude Children's Research Hospital HPC cluster (HPCF) profile"
config_profile_url = "https://www.stjude.org/"
max_cpus = 64
max_memory = 1024.GB
max_time = 240.h
}
process {
// use `resourceLimits` directive to control resource
// Note: `resourceLimits` is a new feature starting from version 24.04.0.
// To use it, `Nextflow` should be upgraded to 24.04.0.
// see: https://github.com/nf-core/tools/issues/2923
resourceLimits = [
memory: 1024.GB,
cpus: 32,
time: 240.h
]
executor = 'lsf'
scratch = false
cache = 'lenient'
maxRetries = 3
errorStrategy = { task.exitStatus in [143, 137, 134, 139, 140] ? 'retry' : 'finish' }
afterScript = 'sleep 10'
// to avoid fail when using storeDir for missing output
beforeScript = """
module load singularity/4.1.1
export SINGULARITY_TMPDIR="${System.getenv('TMPDIR')}" # The 'TMPDIR' environment variable will be set up by LSF after a job is submitted.
"""
// queue selection based on task configs
// if urgent, change default queue from 'standard' to 'priority'
queue = {
if (task.accelerator) {
'gpu'
}
else if (task.time < 30.min) {
"short"
}
else if (task.memory > 512.GB) {
"large_mem"
}
else {
"standard"
}
}
// clusterOptions for gpu task:
// NOTE: We use GPU exclusively in each job
clusterOptions = { task.accelerator ? "-gpu \"num=${task.accelerator.request}/host:mode=shared:j_exclusive=yes\"" : null }
}
singularity {
envWhitelist = "SINGULARITY_TMPDIR,TMPDIR,CUDA_VISIBLE_DEVICES"
// allow the tmp dir and GPU visible devices visible in the containers
enabled = true
autoMounts = true
runOptions = '-p -B "$TMPDIR"'
pullTimeout = "3 hours"
}
// clean the generated files in the working directory
cleanup = true
executor {
name = 'lsf'
queueSize = 100
perTaskReserve = true
perJobMemLimit = false
submitRateLimit = "10/1sec"
exitReadTimeout = "5 min"
jobName = {
task.name
.replace("[", "(")
.replace("]", ")")
.replace(" ", "_")
}
}