File size: 3,478 Bytes
0c49c58 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH -p small-g
#SBATCH -t 24:00:00
#SBATCH --gpus-per-node=mi250:1
#SBATCH --exclusive=user
#SBATCH --hint=nomultithread
#SBATCH --account=project_462000241
#SBATCH -o logs/%j.out
#SBATCH -e logs/%j.err
source /pfs/lustrep2/scratch/project_462000241/muennighoff/lmevallatest/venv/bin/activate
cd /pfs/lustrep2/scratch/project_462000241/muennighoff/
export HF_DATASETS_OFFLINE=1
export HF_DATASETS_CACHE=/pfs/lustrep2/scratch/project_462000185/muennighoff/ds_cache
# Evaluation to run
CONFIGS=(
bigbench_analogies
bigbench_arithmetic_1_digit_addition
bigbench_arithmetic_1_digit_division
bigbench_arithmetic_1_digit_multiplication
bigbench_arithmetic_1_digit_subtraction
bigbench_arithmetic_2_digit_addition
bigbench_arithmetic_2_digit_division
bigbench_arithmetic_2_digit_multiplication
bigbench_arithmetic_2_digit_subtraction
bigbench_arithmetic_3_digit_addition
bigbench_arithmetic_3_digit_division
bigbench_arithmetic_3_digit_multiplication
bigbench_arithmetic_3_digit_subtraction
bigbench_arithmetic_4_digit_addition
bigbench_arithmetic_4_digit_division
bigbench_arithmetic_4_digit_multiplication
bigbench_arithmetic_4_digit_subtraction
bigbench_arithmetic_5_digit_addition
bigbench_arithmetic_5_digit_division
bigbench_arithmetic_5_digit_multiplication
bigbench_arithmetic_5_digit_subtraction
bigbench_cause_and_effect_one_sentence
bigbench_cause_and_effect_one_sentence_no_prompt
bigbench_cause_and_effect_two_sentences
bigbench_emotions
bigbench_empirical_judgments
bigbench_general_knowledge
bigbench_hhh_alignment_harmless
bigbench_hhh_alignment_helpful
bigbench_hhh_alignment_honest
bigbench_hhh_alignment_other
bigbench_misconceptions
bigbench_paraphrase
bigbench_sentence_ambiguity
bigbench_similarities_abstraction
)
CONFIGSZ=(
bigbench_intent_recognition
)
CONFIGSX=(
bigbench_analogies
)
#CONFIGS=(
#bigbench_similarities_abstraction
#)
CONFIGSX=(
bigbench_cause_and_effect_one_sentence
bigbench_cause_and_effect_one_sentence_no_prompt
bigbench_cause_and_effect_two_sentences
bigbench_emotions
bigbench_empirical_judgments
bigbench_general_knowledge
bigbench_hhh_alignment_harmless
bigbench_hhh_alignment_helpful
bigbench_hhh_alignment_honest
bigbench_hhh_alignment_other
)
# Iterate through all possible combinations of config and run the jobs
for ((i=0; i<${#CONFIGS[@]}; i++)); do
eval_script="./eval_$i.slurm"
OUTPUT_PATH=results/bloom_176B_${CONFIGS[$i]}_1shot.json
cat <<EOT > $eval_script
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH -p standard-g
#SBATCH -t 48:00:00
#SBATCH --gpus-per-node=mi250:8
#SBATCH --exclusive=user
#SBATCH --hint=nomultithread
#SBATCH --account=project_462000241
#SBATCH -o logs/%j.out
#SBATCH -e logs/%j.err
source /pfs/lustrep2/scratch/project_462000241/muennighoff/lmevallatest/venv/bin/activate
cd /pfs/lustrep2/scratch/project_462000241/muennighoff/lmevallatest/lm-evaluation-harness
export HF_DATASETS_OFFLINE=1
export HF_DATASETS_CACHE=/pfs/lustrep2/scratch/project_462000185/muennighoff/ds_cache
python main.py \
--model hf-causal-experimental \
--model_args pretrained=/pfs/lustrep2/scratch/project_462000185/muennighoff/bloom \
--tasks ${CONFIGS[$i]} \
--device cuda:0 \
--output_path $OUTPUT_PATH \
--no_cache --num_fewshot 1
echo "END TIME: $(date)"
EOT
# Submit the job
sbatch $eval_script
# Sleep for a bit to avoid hitting the job submission limit
# sleep 0.1
done
echo "END TIME: $(date)"
|