Muennighoff commited on
Commit
d484f8f
1 Parent(s): 62e58c1

Create sbatch_2b855bc4perplexity.sh

Browse files
Files changed (1) hide show
  1. sbatch_2b855bc4perplexity.sh +159 -0
sbatch_2b855bc4perplexity.sh ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --nodes=32
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --cpus-per-task=32
5
+ #SBATCH --mem=256G
6
+ #SBATCH -p standard-g
7
+ #SBATCH -t 48:00:00
8
+ #SBATCH --gpus-per-node=mi250:8
9
+ #SBATCH --exclusive=user
10
+ #SBATCH --hint=nomultithread
11
+ #SBATCH --account=project_462000119
12
+ #SBATCH -o logs/%j.out
13
+ #SBATCH -e logs/%j.err
14
+
15
+
16
+ VARIANT=2b855bc4perplexity
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+ DATA_PATH="/scratch/project_462000119/data/c4perplexity/gpt2tok_perplexity_text_document"
39
+
40
+ PP_SIZE=1
41
+ TP_SIZE=1
42
+
43
+ MICRO_BATCH_SIZE=2
44
+ GRADIENT_ACCUMULATION_STEPS=1
45
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
46
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
47
+
48
+ # Model parameters
49
+ source model_params.sh
50
+ MODEL_PARAM=("${PARAM_2980M[@]}")
51
+ NHIDDEN=${MODEL_PARAM[0]}
52
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
53
+ KV_SIZE=${MODEL_PARAM[2]}
54
+ NHEADS=${MODEL_PARAM[3]}
55
+ NLAYERS=${MODEL_PARAM[4]}
56
+ SEQ_LEN=2048
57
+
58
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
59
+
60
+ SAVE_INTERVAL=10000
61
+
62
+ # Tokens: 55000000000
63
+ # -> Samples: 26855469
64
+ TRAIN_SAMPLES=26_855_469
65
+
66
+ OPTIMIZER_ARGS=" \
67
+ --optimizer adam \
68
+ --adam-beta1 0.9 \
69
+ --adam-beta2 0.999 \
70
+ --adam-eps 1e-8 \
71
+ --lr 2e-4 \
72
+ --min-lr 2e-5 \
73
+ --lr-decay-style cosine \
74
+ --lr-decay-samples $TRAIN_SAMPLES \
75
+ --lr-warmup-samples 268_555 \
76
+ --clip-grad 1.0 \
77
+ --weight-decay 1e-1 \
78
+ "
79
+
80
+ GPT_ARGS=" \
81
+ --num-layers $NLAYERS \
82
+ --hidden-size $NHIDDEN \
83
+ --num-attention-heads $NHEADS \
84
+ --kv-channels $KV_SIZE \
85
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
86
+ --seq-length $SEQ_LEN \
87
+ --max-position-embeddings $SEQ_LEN \
88
+ --micro-batch-size $MICRO_BATCH_SIZE \
89
+ --global-batch-size $GLOBAL_BATCH_SIZE \
90
+ --train-samples $TRAIN_SAMPLES \
91
+ --vocab-file $VOCAB_FILE \
92
+ --merge-file $MERGE_FILE \
93
+ --clip-grad 1.0 \
94
+ --kill-switch-path $KILL_SWITCH_PATH \
95
+ --bf16 \
96
+ $OPTIMIZER_ARGS \
97
+ "
98
+
99
+ OUTPUT_ARGS=" \
100
+ --log-interval 10 \
101
+ --save-interval $SAVE_INTERVAL \
102
+ --eval-interval 1000 \
103
+ --eval-iters 1 \
104
+ --tensorboard-dir $TENSORBOARD_PATH \
105
+ --tensorboard-queue-size 5 \
106
+ --log-timers-to-tensorboard \
107
+ --log-batch-size-to-tensorboard \
108
+ --log-validation-ppl-to-tensorboard \
109
+ "
110
+
111
+ ZERO_STAGE=0
112
+
113
+ mkdir -p ds_configs
114
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
115
+
116
+ cat <<EOF > $DS_CONFIG_PATH
117
+ {
118
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
119
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
120
+ "gradient_clipping": 1.0,
121
+ "zero_optimization": {
122
+ "stage": $ZERO_STAGE
123
+ },
124
+ "bf16": {
125
+ "enabled": true
126
+ },
127
+ "steps_per_print": 2000,
128
+ "wall_clock_breakdown": false
129
+ }
130
+ EOF
131
+
132
+ DEEPSPEED_ARGS=" \
133
+ --deepspeed \
134
+ --deepspeed_config $DS_CONFIG_PATH \
135
+ --zero-stage $ZERO_STAGE \
136
+ "
137
+
138
+ CMD=" \
139
+ Megatron-DeepSpeed/pretrain_gpt.py \
140
+ --tensor-model-parallel-size $TP_SIZE \
141
+ --pipeline-model-parallel-size $PP_SIZE \
142
+ $GPT_ARGS \
143
+ $OUTPUT_ARGS \
144
+ --save $CHECKPOINT_PATH \
145
+ --load $CHECKPOINT_PATH \
146
+ --data-path $DATA_PATH \
147
+ --data-impl mmap \
148
+ --split 949,50,1 \
149
+ $DEEPSPEED_ARGS \
150
+ "
151
+
152
+ echo $CMD
153
+
154
+ echo "START $SLURM_JOBID: $(date)"
155
+
156
+ # bash launch_srun.sh $CMD
157
+ srun --label launch.sh $CMD
158
+
159
+ echo "END $SLURM_JOBID: $(date)"