Training in progress, step 100000
Browse files- babyslm/syntactic.txt +0 -0
- blimp_results.json +2 -2
- config.json +4 -4
- model.safetensors +2 -2
- training_args.bin +1 -1
babyslm/syntactic.txt
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
blimp_results.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:155e95079e6843f30888c6a5282ee97f22589ed8f19e8b40084fd0a43b07684c
|
3 |
+
size 80216132
|
config.json
CHANGED
@@ -10,10 +10,10 @@
|
|
10 |
"initializer_range": 0.02,
|
11 |
"layer_norm_epsilon": 1e-05,
|
12 |
"model_type": "gpt2",
|
13 |
-
"n_embd":
|
14 |
-
"n_head":
|
15 |
-
"n_inner":
|
16 |
-
"n_layer":
|
17 |
"n_positions": 256,
|
18 |
"reorder_and_upcast_attn": false,
|
19 |
"resid_pdrop": 0.1,
|
|
|
10 |
"initializer_range": 0.02,
|
11 |
"layer_norm_epsilon": 1e-05,
|
12 |
"model_type": "gpt2",
|
13 |
+
"n_embd": 512,
|
14 |
+
"n_head": 8,
|
15 |
+
"n_inner": 2048,
|
16 |
+
"n_layer": 8,
|
17 |
"n_positions": 256,
|
18 |
"reorder_and_upcast_attn": false,
|
19 |
"resid_pdrop": 0.1,
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:45b8ee841a5ba6565c3b7951bc2ea21d223f2de7b42897642a701e420133a5cc
|
3 |
+
size 134182632
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5368
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4bdb9bb7a47c8d79dae8b6e300ed84f1e99318dafb96dc9dbd2c7137c0067d4e
|
3 |
size 5368
|