-
Notifications
You must be signed in to change notification settings - Fork 0
/
bash_script_1.sh
36 lines (29 loc) · 926 Bytes
/
bash_script_1.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#!/bin/bash
# Configuration
TOTAL_NODES=2
GPUS_PER_NODE=1
CPUS_PER_TASK=4
MASTER_ADDR="147.189.195.2" # EDIT: IP of the first VM
MASTER_PORT=6000
NODE_RANK=0 # EDIT: Set to 0 for first VM, 1 for second VM
# EDIT: Path to your Python script
PROGRAM_PATH="/home/ubuntu/multi-node-gpu/torch-distributed-gpu-test.py"
echo "START TIME: $(date)"
# Logging
LOG_PATH="main_log_node_${NODE_RANK}.txt"
# Function to run the distributed training
run_distributed_training() {
python -u -m torch.distributed.run \
--nproc_per_node $GPUS_PER_NODE \
--nnodes $TOTAL_NODES \
--node_rank $NODE_RANK \
--rdzv_endpoint $MASTER_ADDR:$MASTER_PORT \
--rdzv_backend c10d \
--max_restarts 0 \
--role $(hostname -s|tr -dc '0-9'): \
--tee 3 \
$PROGRAM_PATH
}
# Run the training and log output
run_distributed_training 2>&1 | tee -a $LOG_PATH
echo "END TIME: $(date)"