codebyzeb commited on
Commit
889c086
1 Parent(s): 8e1a1fc

Training in progress, step 25000

Browse files
babyslm/syntactic.txt CHANGED
The diff for this file is too large to render. See raw diff
 
blimp_results.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2def99d5596a716505ba5ff92e5d3c92b5656f6ea6bc185d219fe26776261522
3
- size 80215544
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2147014be6c043e2b8ed41533487d0c39d674a7c078bb4d410bf8b3402612c4
3
+ size 80218658
config.json CHANGED
@@ -3,9 +3,9 @@
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
- "attn_pdrop": 0.1,
7
  "bos_token_id": 0,
8
- "embd_pdrop": 0.1,
9
  "eos_token_id": 0,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
@@ -16,7 +16,7 @@
16
  "n_layer": 6,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
- "resid_pdrop": 0.1,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
 
3
  "architectures": [
4
  "GPT2LMHeadModel"
5
  ],
6
+ "attn_pdrop": 0.3,
7
  "bos_token_id": 0,
8
+ "embd_pdrop": 0.3,
9
  "eos_token_id": 0,
10
  "initializer_range": 0.02,
11
  "layer_norm_epsilon": 1e-05,
 
16
  "n_layer": 6,
17
  "n_positions": 256,
18
  "reorder_and_upcast_attn": false,
19
+ "resid_pdrop": 0.3,
20
  "scale_attn_by_inverse_layer_idx": false,
21
  "scale_attn_weights": true,
22
  "summary_activation": null,
hydra_config_1725017275.3935974.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ experiment:
2
+ seed: 42
3
+ name: gpt2_5M-bpe-text-dyn-03
4
+ group: babylm-small
5
+ dry_run: false
6
+ offline_run: false
7
+ evaluate_segmentation: false
8
+ evaluate_babyslm: true
9
+ blimp_tasks: blimp_filtered,blimp_supplement
10
+ resume_checkpoint_path: null
11
+ resume_run_id: null
12
+ dataset:
13
+ name: transformersegmentation/BabyLM-phonemized
14
+ subconfig: strict_small
15
+ text_column: text
16
+ is_phonemes: false
17
+ max_age: null
18
+ tokenizer:
19
+ name: transformersegmentation/BabyLM-BPE-ortho-tokenizer
20
+ data_preprocessing:
21
+ max_input_length: 128
22
+ join_utts: dynamic
23
+ remove_word_boundaries: false
24
+ subsample: null
25
+ subsample_type: examples
26
+ model:
27
+ name: gpt2_lm
28
+ model_kwargs:
29
+ n_layer: 6
30
+ n_head: 8
31
+ n_embd: 256
32
+ n_positions: 256
33
+ n_inner: 1024
34
+ resid_pdrop: 0.3
35
+ embd_pdrop: 0.3
36
+ attn_pdrop: 0.3
37
+ trainer:
38
+ batch_size: 32
39
+ lr: 0.001
40
+ num_warmup_steps: 100000
41
+ max_training_steps: 600000
42
+ logging_steps: 6000
43
+ save_steps: 25000
44
+ eval_steps: 25000
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65a12d689bb9f43ee19c23d015355a4a4b2512d49ad3761b63f9c9fe5a595c14
3
  size 35609928
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af76708dfc52079dd81be19bbde53a58516495d3b59f93b8dac1c20108155025
3
  size 35609928
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc05a9b18c09d9343d851f614a906fd152098177d5c8db3b58ee8a0f0ce54453
3
  size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9137eae5b2fc0ccaecfcc834c539253a235e58790318a23a66c11247c57014a
3
  size 5368