Defalt-404
commited on
Commit
•
38a656e
1
Parent(s):
5047984
Update README.md
Browse files
README.md
CHANGED
@@ -19,19 +19,32 @@ GPT-6B_Tuned_small_pile is a GPT-j-6B model trained on 0.1 million example of pi
|
|
19 |
n_embd: 4096, n_layer: 28, n_positions: 2048
|
20 |
|
21 |
Tuning Parameters:
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
|
37 |
|
|
|
19 |
n_embd: 4096, n_layer: 28, n_positions: 2048
|
20 |
|
21 |
Tuning Parameters:
|
22 |
+
|
23 |
+
val_split_percent: 20,
|
24 |
+
|
25 |
+
momentum: 0.9
|
26 |
+
|
27 |
+
train_batch_size (eff) : 32
|
28 |
+
|
29 |
+
train_micro_batch: 16
|
30 |
+
|
31 |
+
gradient_accumulation_steps: 2
|
32 |
+
|
33 |
+
gradient_clipping: 0.5
|
34 |
+
|
35 |
+
learning_rate: 0.00001
|
36 |
+
|
37 |
+
weight_decay: 0.01
|
38 |
+
|
39 |
+
lr_schedular: cosine
|
40 |
+
|
41 |
+
lr_warmup_steps: 1000
|
42 |
+
|
43 |
+
lr_decay: 0.1
|
44 |
+
|
45 |
+
lr_decay_step: 2000
|
46 |
+
|
47 |
+
mixed_precision: bf16
|
48 |
|
49 |
|
50 |
|