Training in progress, epoch 0
Browse files- config.json +30 -0
- eval_job_output.txt +42 -0
- logs/events.out.tfevents.1715123789.sphinx2 +3 -0
- model.safetensors +3 -0
- train_job_output.txt +0 -0
- training_args.bin +3 -0
config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "EleutherAI/pythia-70m",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 0,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 512,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 2048,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"max_position_embeddings": 2048,
|
18 |
+
"model_type": "gpt_neox",
|
19 |
+
"num_attention_heads": 8,
|
20 |
+
"num_hidden_layers": 6,
|
21 |
+
"rope_scaling": null,
|
22 |
+
"rotary_emb_base": 10000,
|
23 |
+
"rotary_pct": 0.25,
|
24 |
+
"tie_word_embeddings": false,
|
25 |
+
"torch_dtype": "float32",
|
26 |
+
"transformers_version": "4.39.3",
|
27 |
+
"use_cache": true,
|
28 |
+
"use_parallel_residual": true,
|
29 |
+
"vocab_size": 50304
|
30 |
+
}
|
eval_job_output.txt
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
slurm submission log: 2024-05-07 15:08:34.265333
|
2 |
+
created following sbatch script:
|
3 |
+
|
4 |
+
###############################
|
5 |
+
|
6 |
+
#!/bin/bash
|
7 |
+
|
8 |
+
#SBATCH --account=nlp
|
9 |
+
#SBATCH --cpus-per-task=16
|
10 |
+
#SBATCH --dependency=afterok:7543205
|
11 |
+
#SBATCH --gres=gpu:1
|
12 |
+
#SBATCH --job-name=tthrush-job-3132094
|
13 |
+
#SBATCH --mem=60G
|
14 |
+
#SBATCH --nodelist=sphinx2
|
15 |
+
#SBATCH --open-mode=append
|
16 |
+
#SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms/pythia-70m_xnli_en/eval_job_output.txt
|
17 |
+
#SBATCH --partition=sphinx
|
18 |
+
#SBATCH --time=14-0
|
19 |
+
|
20 |
+
# activate your desired anaconda environment
|
21 |
+
. /nlp/scr/tthrush/miniconda3/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection
|
22 |
+
|
23 |
+
# cd to working directory
|
24 |
+
cd .
|
25 |
+
|
26 |
+
# launch commands
|
27 |
+
srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms/pythia-70m_xnli_en,revision=main,dtype=float16,trust_remote_code=True --tasks xnli_en,xnli_fr,sciq,piqa,lambada,arc_easy --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/llms/pythia-70m_xnli_en/perf'
|
28 |
+
|
29 |
+
###############################
|
30 |
+
|
31 |
+
submission to slurm complete!
|
32 |
+
|
33 |
+
|
34 |
+
###############################
|
35 |
+
slurm submission output
|
36 |
+
|
37 |
+
Submitted batch job 7543206
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
###############################
|
42 |
+
|
logs/events.out.tfevents.1715123789.sphinx2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3fc8dd82c3ec925653e9dd79c9d6a46b7b775b71195b0daa98db4a9d692b629a
|
3 |
+
size 95282
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d7656dba55ad749f46457103a1d9eb71eba4eaea53ba76a6d594e8311d1934b
|
3 |
+
size 281715176
|
train_job_output.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f998d228fc48950699bcacb4814845a216c5ae0a0b93f23b7fb7d35e13041f0c
|
3 |
+
size 5048
|