forked from mauranolab/sge2slurm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
slurm.conf
110 lines (105 loc) · 3.43 KB
/
slurm.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#
# Example slurm.conf file. Please run configurator.html
# (in doc/html) to build a configuration file customized
# for your environment.
#
#
# slurm.conf file generated by configurator.html.
#
# See the slurm.conf man page for more information.
#
ClusterName=SLURM_CLUSTER
#SlurmdUser=slurm
SlurmctldPort=6817
SlurmdPort=6818
AuthType=auth/munge
StateSaveLocation=/cm/shared/apps/slurm/var/cm/statesave
SlurmdSpoolDir=/cm/local/apps/slurm/var/spool
SwitchType=switch/none
MpiDefault=none
SlurmctldPidFile=/var/run/slurm/slurmctld.pid
SlurmdPidFile=/var/run/slurm/slurmd.pid
ProctrackType=proctrack/cgroup
CacheGroups=0
ReturnToService=2
#PrologSlurmctld=/cm/local/apps/cmd/scripts/prolog-prejob
#Prolog=/cm/shared/apps/slurm/var/etc/slurm_prolog
#Epilog=/cm/shared/apps/slurm/var/etc/slurm_epilog_clean
#TaskProlog=/cm/shared/apps/slurm/var/etc/slurm_task_prolog
TaskPlugin=task/cgroup
SlurmctldTimeout=300
SlurmdTimeout=200
InactiveLimit=0
MinJobAge=300
KillWait=30
MessageTimeout=20
HealthCheckInterval=60
CompleteWait=600
AcctGatherNodeFreq=30
GetEnvTimeout=10
ExtSensorsFreq=30
UnkillableStepTimeout=300
MaxTasksPerNode=2048
BatchStartTimeout=30
OverTimeLimit=15
ResvOverRun=15
#FastSchedule=1
SelectType=select/cons_res
SelectTypeParameters=CR_CPU
SallocDefaultCommand = "$SHELL"
PriorityType=priority/multifactor
PriorityDecayHalfLife=7-0
PriorityFavorSmall=NO
PriorityMaxAge=14-0
#PriorityWeightAge=1000
PriorityWeightAge=100
PriorityWeightFairshare=1000000
PriorityWeightJobSize=0
PriorityWeightPartition=2000
PriorityCalcPeriod=1
PriorityWeightQOS=5000
SlurmctldLogFile=/var/tmp/slurmctld
SlurmdLogFile=/var/log/slurmd
JobCompType=jobcomp/none
AccountingStorageEnforce=associations,limits,qos
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageUser=slurm
EnforcePartLimits=YES
VSizeFactor=95
WaitTime=0
MaxJobCount=50000
MaxArraySize=100000
#DefMemPerCPU=5333
SchedulerParameters=default_queue_depth=500
JobAcctGatherType=jobacct_gather/linux
# This section of this file was automatically generated by cmd. Do not edit manually!
# BEGIN AUTOGENERATED SECTION -- DO NOT REMOVE
# Scheduler
SchedulerType=sched/builtin
# Master nodes
ControlMachine=isglcdcpap001
ControlAddr=isglcdcpap001
AccountingStorageHost=isglcdcpap001
# Nodes
NodeName=isglcdctap001 Procs=15
NodeName=isgnode[001-006] Procs=48
# Partitions
PartitionName=defq Default=YES MinNodes=1 AllowGroups=ALL DisableRootJobs=NO RootOnly=NO Hidden=NO Shared=NO GraceTime=0 PreemptMode=OFF ReqResv=NO AllowAccounts=ALL AllowQos=ALL LLN=YES ExclusiveUser=NO PriorityJobFactor=1 PriorityTier=1 OverSubscribe=YES:48 State=UP Nodes=isgnode[001-006]
PartitionName=testq Default=NO MinNodes=1 AllowGroups=ALL DisableRootJobs=NO RootOnly=NO Hidden=NO Shared=NO GraceTime=0 PreemptMode=OFF ReqResv=NO AllowAccounts=ALL AllowQos=ALL LLN=YES ExclusiveUser=NO PriorityJobFactor=1 PriorityTier=1 OverSubscribe=YES:16 State=UP Nodes=isglcdctap001
# Generic resources types
GresTypes=gpu,mic
# Epilog/Prolog parameters
PrologSlurmctld=/cm/local/apps/cmd/scripts/prolog-prejob
Prolog=/cm/local/apps/cmd/scripts/prolog
Epilog=/cm/local/apps/cmd/scripts/epilog
# Fast Schedule option
FastSchedule=1
# Power Saving
SuspendTime=-1 # this disables power saving
SuspendTimeout=30
ResumeTimeout=60
SuspendProgram=/cm/local/apps/cluster-tools/wlm/scripts/slurmpoweroff
ResumeProgram=/cm/local/apps/cluster-tools/wlm/scripts/slurmpoweron
# END AUTOGENERATED SECTION -- DO NOT REMOVE
#SelectType=select/cons_res
#SelectTypeParameters=CR_CPU