codebyzeb commited on
Commit
5e1d6eb
1 Parent(s): 73a5025

Training in progress, step 50000

Browse files
babyslm/syntactic.txt CHANGED
The diff for this file is too large to render. See raw diff
 
blimp_results.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3c3e5231a5833ea81c3c627178853a917de1900b8646da982207cc3d00067e3
3
- size 80216800
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:786c4ea1c8aee1d056e0da411594cb9fd8f38d1baa2aee83a9f5a8545e8a4d00
3
+ size 80218105
config.json CHANGED
@@ -3,9 +3,9 @@
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
- "attn_pdrop": 0.1,
7
  "bos_token_id": 0,
8
- "embd_pdrop": 0.1,
9
  "eos_token_id": 0,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
@@ -16,7 +16,7 @@
16
  "n_layer": 6,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
- "resid_pdrop": 0.1,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
 
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
+ "attn_pdrop": 0.3,
7
  "bos_token_id": 0,
8
+ "embd_pdrop": 0.3,
9
  "eos_token_id": 0,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
 
16
  "n_layer": 6,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
hydra_config_1724933085.355582.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ experiment:
2
+ seed: 42
3
+ name: gpt2_19M-bpe-text-dyn-03
4
+ group: babylm-small
5
+ dry_run: false
6
+ offline_run: false
7
+ evaluate_segmentation: false
8
+ evaluate_babyslm: true
9
+ blimp_tasks: blimp_filtered,blimp_supplement
10
+ resume_checkpoint_path: null
11
+ resume_run_id: null
12
+ dataset:
13
+ name: transformersegmentation/BabyLM-phonemized
14
+ subconfig: strict_small
15
+ text_column: text
16
+ is_phonemes: false
17
+ max_age: null
18
+ tokenizer:
19
+ name: transformersegmentation/BabyLM-BPE-ortho-tokenizer
20
+ data_preprocessing:
21
+ max_input_length: 128
22
+ join_utts: dynamic
23
+ remove_word_boundaries: false
24
+ subsample: null
25
+ subsample_type: examples
26
+ model:
27
+ name: gpt2_lm
28
+ model_kwargs:
29
+ n_layer: 6
30
+ n_head: 8
31
+ n_embd: 512
32
+ n_positions: 256
33
+ n_inner: 2048
34
+ resid_pdrop: 0.3
35
+ embd_pdrop: 0.3
36
+ attn_pdrop: 0.3
37
+ trainer:
38
+ batch_size: 32
39
+ lr: 0.001
40
+ num_warmup_steps: 90000
41
+ max_training_steps: 400000
42
+ logging_steps: 4000
43
+ save_steps: 50000
44
+ eval_steps: 50000
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a1856b83481be0f7a52a705416d3ffb7c8d242e334e4ea6fdab8e61552617d9
3
  size 108961160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:114a339d56e7961c6cae9800c50f675fe0f738602fa7e4cf307c1695c6030aff
3
  size 108961160
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5dc80136f7f23f477ebdd661d182228c4de21c404425ed1d103d090bb1541237
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0b5a8b6f1890201775a9dff957294eb0b004f3daffec4d13e5aebb84bcac43e
3
  size 5368