-
Notifications
You must be signed in to change notification settings - Fork 0
/
lm_eval_mistral_7b_all.sh
75 lines (64 loc) · 1.99 KB
/
lm_eval_mistral_7b_all.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#!/bin/bash
#SBATCH --job-name=lm-eval-mistral-7b-all
#SBATCH --output=lm-eval-mistral-7b-all-%j.out
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=8
#SBATCH --mem=64G
#SBATCH --partition=ai
#SBATCH --qos=ai
#SBATCH --account=ai
# for v100 and a6000 constraint and gres should be both set
# #SBATCH --gres=gpu:tesla_t4:1
# #SBATCH --gres=gpu:tesla_v100:1
#SBATCH --gres=gpu:nvidia_a40:4
# #SBATCH --gres=gpu:rtx_a6000:1
#SBATCH --constraint=ai
#SBATCH --time=24:0:0
#SBATCH --mail-type=END
#SBATCH [email protected]
# Modules
###############################################
echo "======================="
echo "Loading Modules..."
module load git/2.25.0
module load cuda/11.8.0
module load cudnn/8.9.5/cuda-11.x
module load gcc/9.3.0
module load anaconda/3.21.05
# Set stack size to unlimited
echo "Setting stack size to unlimited..."
ulimit -s unlimited
ulimit -l unlimited
ulimit -a
echo
# Environment Prep
###############################################
echo "======================="
echo "Preparing environment..."
# conda activate lm-eval
source activate lm-eval2
echo "======================="
# prevent hanging
export WORLD_SIZE=4
export CUDA_VISIBLE_DEVICES=0,1,2,3
export NCCL_P2P_LEVEL=NVL
nvidia-smi
# Run
###############################################
echo "Run Script!!!"
echo "======================="
# python -m lm_eval --model hf \
accelerate launch -m lm_eval --model hf \
--model_args pretrained=/kuacc/users/muguney/hpc_run/lm-models/Mistral-7B-v0.1,dtype="bfloat16",max_length=4096 \
--tasks belebele_tr,exams_tr,gecturk_generation,ironytr,mkqa_tr,mlsum_tr,news_cat,nli_tr,offenseval_tr,sts_tr,tquad,trclaim19,turkish_plu_prompt,tr-wikihow-summ,wiki_lingua_tr,wmt-tr-en-prompt,xcopa_tr,xfact_tr,xlsum_tr,xquad_tr \
--batch_size 1 --write_out --log_samples \
--num_fewshot 0 \
--output_path task_outs/mistral_7b \
--use_cache mistral7b \
--verbosity DEBUG \
# --limit 40
# --model_args parallelize=True \
# --device cuda:0 \
# --limit 10
echo "======================="
echo "Done!!"