loubnabnl HF staff commited on
Commit
d7feb6c
1 Parent(s): 7e4bfbc

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: HuggingFaceTB/SmolLM-135M
4
+ tags:
5
+ - trl
6
+ - sft
7
+ - generated_from_trainer
8
+ datasets:
9
+ - generator
10
+ model-index:
11
+ - name: smollm-135M-instruct-v2
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/loubnabnl/huggingface/runs/qlti0xh5)
19
+ # smollm-135M-instruct-v2
20
+
21
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM-135M](https://huggingface.co/HuggingFaceTB/SmolLM-135M) on the generator dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 1.4183
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.001
43
+ - train_batch_size: 4
44
+ - eval_batch_size: 4
45
+ - seed: 42
46
+ - distributed_type: multi-GPU
47
+ - num_devices: 8
48
+ - gradient_accumulation_steps: 4
49
+ - total_train_batch_size: 128
50
+ - total_eval_batch_size: 32
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: cosine
53
+ - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 1
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss |
59
+ |:-------------:|:-----:|:----:|:---------------:|
60
+ | 1.0534 | 1.0 | 819 | 1.4183 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - Transformers 4.42.3
66
+ - Pytorch 2.1.2
67
+ - Datasets 2.20.0
68
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 45247522406400.0,
4
+ "train_loss": 1.1312509008088536,
5
+ "train_runtime": 1838.4003,
6
+ "train_samples": 321338,
7
+ "train_samples_per_second": 57.016,
8
+ "train_steps_per_second": 0.445
9
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HuggingFaceTB/SmolLM-135M",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 576,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 1536,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 9,
18
+ "num_hidden_layers": 30,
19
+ "num_key_value_heads": 3,
20
+ "pad_token_id": 2,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 10000.0,
25
+ "tie_word_embeddings": true,
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.42.3",
28
+ "use_cache": false,
29
+ "vocab_size": 49152
30
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.42.3"
7
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4522299d4137ae4e43ec0d3741ccf80650c4e56dc7118717ef5462ea09a6567
3
+ size 269060552
runs/Aug26_07-23-56_ip-26-0-167-245/events.out.tfevents.1724657633.ip-26-0-167-245.57662.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0120ace273ef1090ba754214041ab142804e199c64d68548c10b9e65710da7bd
3
+ size 40178
special_tokens_map.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<|im_start|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ {
11
+ "content": "<|im_end|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ }
17
+ ],
18
+ "bos_token": "<|im_start|>",
19
+ "eos_token": "<|im_end|>",
20
+ "pad_token": "<|im_end|>",
21
+ "unk_token": {
22
+ "content": "<|endoftext|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ }
28
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<repo_name>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<reponame>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": "<file_sep>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "12": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "13": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "14": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "15": {
125
+ "content": "<jupyter_script>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "16": {
133
+ "content": "<empty_output>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ }
140
+ },
141
+ "additional_special_tokens": [
142
+ "<|im_start|>",
143
+ "<|im_end|>"
144
+ ],
145
+ "bos_token": "<|im_start|>",
146
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
+ "clean_up_tokenization_spaces": false,
148
+ "eos_token": "<|im_end|>",
149
+ "model_max_length": 2048,
150
+ "pad_token": "<|im_end|>",
151
+ "tokenizer_class": "GPT2Tokenizer",
152
+ "unk_token": "<|endoftext|>",
153
+ "vocab_size": 49152
154
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 45247522406400.0,
4
+ "train_loss": 1.1312509008088536,
5
+ "train_runtime": 1838.4003,
6
+ "train_samples": 321338,
7
+ "train_samples_per_second": 57.016,
8
+ "train_steps_per_second": 0.445
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,1198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 819,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.001221001221001221,
13
+ "grad_norm": 1.0205980343161538,
14
+ "learning_rate": 1.2195121951219513e-05,
15
+ "loss": 1.6063,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.006105006105006105,
20
+ "grad_norm": 0.89369227471272,
21
+ "learning_rate": 6.097560975609756e-05,
22
+ "loss": 1.6127,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.01221001221001221,
27
+ "grad_norm": 0.773993509246519,
28
+ "learning_rate": 0.00012195121951219512,
29
+ "loss": 1.5596,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.018315018315018316,
34
+ "grad_norm": 0.6209997076659476,
35
+ "learning_rate": 0.00018292682926829268,
36
+ "loss": 1.4619,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.02442002442002442,
41
+ "grad_norm": 0.4770932033448127,
42
+ "learning_rate": 0.00024390243902439024,
43
+ "loss": 1.4028,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.030525030525030524,
48
+ "grad_norm": 0.3320501554134054,
49
+ "learning_rate": 0.0003048780487804878,
50
+ "loss": 1.347,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.03663003663003663,
55
+ "grad_norm": 0.2002596607926003,
56
+ "learning_rate": 0.00036585365853658537,
57
+ "loss": 1.3103,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.042735042735042736,
62
+ "grad_norm": 0.20128176628182073,
63
+ "learning_rate": 0.0004268292682926829,
64
+ "loss": 1.3026,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.04884004884004884,
69
+ "grad_norm": 0.17594648344906566,
70
+ "learning_rate": 0.0004878048780487805,
71
+ "loss": 1.2854,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.054945054945054944,
76
+ "grad_norm": 0.17236683696702648,
77
+ "learning_rate": 0.0005487804878048781,
78
+ "loss": 1.271,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.06105006105006105,
83
+ "grad_norm": 0.17916116023910555,
84
+ "learning_rate": 0.0006097560975609756,
85
+ "loss": 1.2661,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.06715506715506715,
90
+ "grad_norm": 0.15617178601932155,
91
+ "learning_rate": 0.0006707317073170732,
92
+ "loss": 1.2528,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.07326007326007326,
97
+ "grad_norm": 0.1927465066785224,
98
+ "learning_rate": 0.0007317073170731707,
99
+ "loss": 1.252,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.07936507936507936,
104
+ "grad_norm": 0.20776151355108807,
105
+ "learning_rate": 0.0007926829268292683,
106
+ "loss": 1.2463,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.08547008547008547,
111
+ "grad_norm": 0.1655472216601126,
112
+ "learning_rate": 0.0008536585365853659,
113
+ "loss": 1.239,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.09157509157509157,
118
+ "grad_norm": 0.20874533177899088,
119
+ "learning_rate": 0.0009146341463414635,
120
+ "loss": 1.2417,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.09768009768009768,
125
+ "grad_norm": 0.21100479425784993,
126
+ "learning_rate": 0.000975609756097561,
127
+ "loss": 1.2328,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.10378510378510379,
132
+ "grad_norm": 0.20904541266897642,
133
+ "learning_rate": 0.000999959117130623,
134
+ "loss": 1.2336,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.10989010989010989,
139
+ "grad_norm": 0.20019701905382167,
140
+ "learning_rate": 0.000999709301584265,
141
+ "loss": 1.2252,
142
+ "step": 90
143
+ },
144
+ {
145
+ "epoch": 0.115995115995116,
146
+ "grad_norm": 0.21413001652476213,
147
+ "learning_rate": 0.0009992324965361792,
148
+ "loss": 1.2243,
149
+ "step": 95
150
+ },
151
+ {
152
+ "epoch": 0.1221001221001221,
153
+ "grad_norm": 0.16522854367968515,
154
+ "learning_rate": 0.0009985289185717684,
155
+ "loss": 1.2121,
156
+ "step": 100
157
+ },
158
+ {
159
+ "epoch": 0.1282051282051282,
160
+ "grad_norm": 0.25042890936419604,
161
+ "learning_rate": 0.000997598887286467,
162
+ "loss": 1.2153,
163
+ "step": 105
164
+ },
165
+ {
166
+ "epoch": 0.1343101343101343,
167
+ "grad_norm": 0.2092348207313529,
168
+ "learning_rate": 0.000996442825140569,
169
+ "loss": 1.2067,
170
+ "step": 110
171
+ },
172
+ {
173
+ "epoch": 0.14041514041514042,
174
+ "grad_norm": 0.1987412303455424,
175
+ "learning_rate": 0.0009950612572673255,
176
+ "loss": 1.211,
177
+ "step": 115
178
+ },
179
+ {
180
+ "epoch": 0.14652014652014653,
181
+ "grad_norm": 0.19695075669196946,
182
+ "learning_rate": 0.0009934548112344088,
183
+ "loss": 1.2139,
184
+ "step": 120
185
+ },
186
+ {
187
+ "epoch": 0.15262515262515264,
188
+ "grad_norm": 0.1432709740921402,
189
+ "learning_rate": 0.0009916242167588433,
190
+ "loss": 1.2147,
191
+ "step": 125
192
+ },
193
+ {
194
+ "epoch": 0.15873015873015872,
195
+ "grad_norm": 0.16002380073043662,
196
+ "learning_rate": 0.0009895703053755364,
197
+ "loss": 1.2028,
198
+ "step": 130
199
+ },
200
+ {
201
+ "epoch": 0.16483516483516483,
202
+ "grad_norm": 0.15340967683760462,
203
+ "learning_rate": 0.0009872940100595598,
204
+ "loss": 1.1995,
205
+ "step": 135
206
+ },
207
+ {
208
+ "epoch": 0.17094017094017094,
209
+ "grad_norm": 0.18565794761972862,
210
+ "learning_rate": 0.0009847963648023522,
211
+ "loss": 1.1992,
212
+ "step": 140
213
+ },
214
+ {
215
+ "epoch": 0.17704517704517705,
216
+ "grad_norm": 0.20113198830929513,
217
+ "learning_rate": 0.000982078504142035,
218
+ "loss": 1.1846,
219
+ "step": 145
220
+ },
221
+ {
222
+ "epoch": 0.18315018315018314,
223
+ "grad_norm": 0.239052195300538,
224
+ "learning_rate": 0.000979141662648057,
225
+ "loss": 1.1917,
226
+ "step": 150
227
+ },
228
+ {
229
+ "epoch": 0.18925518925518925,
230
+ "grad_norm": 0.18541851296547388,
231
+ "learning_rate": 0.0009759871743604004,
232
+ "loss": 1.1883,
233
+ "step": 155
234
+ },
235
+ {
236
+ "epoch": 0.19536019536019536,
237
+ "grad_norm": 0.1976795721725536,
238
+ "learning_rate": 0.0009726164721835996,
239
+ "loss": 1.1844,
240
+ "step": 160
241
+ },
242
+ {
243
+ "epoch": 0.20146520146520147,
244
+ "grad_norm": 0.17463002143028833,
245
+ "learning_rate": 0.0009690310872358572,
246
+ "loss": 1.1969,
247
+ "step": 165
248
+ },
249
+ {
250
+ "epoch": 0.20757020757020758,
251
+ "grad_norm": 0.21323309987103073,
252
+ "learning_rate": 0.0009652326481535434,
253
+ "loss": 1.188,
254
+ "step": 170
255
+ },
256
+ {
257
+ "epoch": 0.21367521367521367,
258
+ "grad_norm": 0.23172909898163094,
259
+ "learning_rate": 0.0009612228803513976,
260
+ "loss": 1.186,
261
+ "step": 175
262
+ },
263
+ {
264
+ "epoch": 0.21978021978021978,
265
+ "grad_norm": 0.22579230538853118,
266
+ "learning_rate": 0.0009570036052387725,
267
+ "loss": 1.1798,
268
+ "step": 180
269
+ },
270
+ {
271
+ "epoch": 0.2258852258852259,
272
+ "grad_norm": 0.1619341093580773,
273
+ "learning_rate": 0.0009525767393922706,
274
+ "loss": 1.1662,
275
+ "step": 185
276
+ },
277
+ {
278
+ "epoch": 0.231990231990232,
279
+ "grad_norm": 0.183207674619275,
280
+ "learning_rate": 0.0009479442936851526,
281
+ "loss": 1.1723,
282
+ "step": 190
283
+ },
284
+ {
285
+ "epoch": 0.23809523809523808,
286
+ "grad_norm": 0.21188703922152927,
287
+ "learning_rate": 0.0009431083723739124,
288
+ "loss": 1.1576,
289
+ "step": 195
290
+ },
291
+ {
292
+ "epoch": 0.2442002442002442,
293
+ "grad_norm": 0.2089350434131967,
294
+ "learning_rate": 0.0009380711721424326,
295
+ "loss": 1.1619,
296
+ "step": 200
297
+ },
298
+ {
299
+ "epoch": 0.2503052503052503,
300
+ "grad_norm": 0.15157070914539794,
301
+ "learning_rate": 0.0009328349811041565,
302
+ "loss": 1.1631,
303
+ "step": 205
304
+ },
305
+ {
306
+ "epoch": 0.2564102564102564,
307
+ "grad_norm": 0.16351230424545868,
308
+ "learning_rate": 0.0009274021777627277,
309
+ "loss": 1.1709,
310
+ "step": 210
311
+ },
312
+ {
313
+ "epoch": 0.2625152625152625,
314
+ "grad_norm": 0.17564187179508275,
315
+ "learning_rate": 0.0009217752299315725,
316
+ "loss": 1.1462,
317
+ "step": 215
318
+ },
319
+ {
320
+ "epoch": 0.2686202686202686,
321
+ "grad_norm": 0.2131938226227643,
322
+ "learning_rate": 0.0009159566936129111,
323
+ "loss": 1.1723,
324
+ "step": 220
325
+ },
326
+ {
327
+ "epoch": 0.27472527472527475,
328
+ "grad_norm": 0.20887852013049316,
329
+ "learning_rate": 0.0009099492118367123,
330
+ "loss": 1.1689,
331
+ "step": 225
332
+ },
333
+ {
334
+ "epoch": 0.28083028083028083,
335
+ "grad_norm": 0.17857721915396702,
336
+ "learning_rate": 0.0009037555134601149,
337
+ "loss": 1.1466,
338
+ "step": 230
339
+ },
340
+ {
341
+ "epoch": 0.2869352869352869,
342
+ "grad_norm": 0.1620613655817899,
343
+ "learning_rate": 0.000897378411927864,
344
+ "loss": 1.1592,
345
+ "step": 235
346
+ },
347
+ {
348
+ "epoch": 0.29304029304029305,
349
+ "grad_norm": 0.2125809185558365,
350
+ "learning_rate": 0.0008908208039943213,
351
+ "loss": 1.1485,
352
+ "step": 240
353
+ },
354
+ {
355
+ "epoch": 0.29914529914529914,
356
+ "grad_norm": 0.1748347227235315,
357
+ "learning_rate": 0.0008840856684076366,
358
+ "loss": 1.1332,
359
+ "step": 245
360
+ },
361
+ {
362
+ "epoch": 0.3052503052503053,
363
+ "grad_norm": 0.22239189840818827,
364
+ "learning_rate": 0.0008771760645566706,
365
+ "loss": 1.141,
366
+ "step": 250
367
+ },
368
+ {
369
+ "epoch": 0.31135531135531136,
370
+ "grad_norm": 3.2813461087169853,
371
+ "learning_rate": 0.000870095131081289,
372
+ "loss": 1.1641,
373
+ "step": 255
374
+ },
375
+ {
376
+ "epoch": 0.31746031746031744,
377
+ "grad_norm": 0.2268012798675002,
378
+ "learning_rate": 0.0008628460844466573,
379
+ "loss": 1.1401,
380
+ "step": 260
381
+ },
382
+ {
383
+ "epoch": 0.3235653235653236,
384
+ "grad_norm": 0.19745728202118107,
385
+ "learning_rate": 0.0008554322174821833,
386
+ "loss": 1.1401,
387
+ "step": 265
388
+ },
389
+ {
390
+ "epoch": 0.32967032967032966,
391
+ "grad_norm": 0.1818589584492454,
392
+ "learning_rate": 0.0008478568978857722,
393
+ "loss": 1.1412,
394
+ "step": 270
395
+ },
396
+ {
397
+ "epoch": 0.33577533577533575,
398
+ "grad_norm": 0.17803807739093683,
399
+ "learning_rate": 0.0008401235666940728,
400
+ "loss": 1.1427,
401
+ "step": 275
402
+ },
403
+ {
404
+ "epoch": 0.3418803418803419,
405
+ "grad_norm": 0.1635966967881225,
406
+ "learning_rate": 0.0008322357367194109,
407
+ "loss": 1.1348,
408
+ "step": 280
409
+ },
410
+ {
411
+ "epoch": 0.34798534798534797,
412
+ "grad_norm": 0.16634307483679497,
413
+ "learning_rate": 0.0008241969909541184,
414
+ "loss": 1.1291,
415
+ "step": 285
416
+ },
417
+ {
418
+ "epoch": 0.3540903540903541,
419
+ "grad_norm": 0.22380463687576627,
420
+ "learning_rate": 0.0008160109809429835,
421
+ "loss": 1.1375,
422
+ "step": 290
423
+ },
424
+ {
425
+ "epoch": 0.3601953601953602,
426
+ "grad_norm": 0.17014516986944303,
427
+ "learning_rate": 0.0008076814251245613,
428
+ "loss": 1.1315,
429
+ "step": 295
430
+ },
431
+ {
432
+ "epoch": 0.3663003663003663,
433
+ "grad_norm": 0.19479534952981664,
434
+ "learning_rate": 0.0007992121071421001,
435
+ "loss": 1.1478,
436
+ "step": 300
437
+ },
438
+ {
439
+ "epoch": 0.3724053724053724,
440
+ "grad_norm": 0.1880458408856176,
441
+ "learning_rate": 0.0007906068741248461,
442
+ "loss": 1.1281,
443
+ "step": 305
444
+ },
445
+ {
446
+ "epoch": 0.3785103785103785,
447
+ "grad_norm": 0.15460815741303807,
448
+ "learning_rate": 0.0007818696349405123,
449
+ "loss": 1.1362,
450
+ "step": 310
451
+ },
452
+ {
453
+ "epoch": 0.38461538461538464,
454
+ "grad_norm": 0.15045386348296025,
455
+ "learning_rate": 0.0007730043584197021,
456
+ "loss": 1.1259,
457
+ "step": 315
458
+ },
459
+ {
460
+ "epoch": 0.3907203907203907,
461
+ "grad_norm": 0.20122942405887176,
462
+ "learning_rate": 0.0007640150715530953,
463
+ "loss": 1.1181,
464
+ "step": 320
465
+ },
466
+ {
467
+ "epoch": 0.3968253968253968,
468
+ "grad_norm": 0.20533248267550017,
469
+ "learning_rate": 0.0007549058576622157,
470
+ "loss": 1.1293,
471
+ "step": 325
472
+ },
473
+ {
474
+ "epoch": 0.40293040293040294,
475
+ "grad_norm": 0.13677076752035072,
476
+ "learning_rate": 0.0007456808545446102,
477
+ "loss": 1.1221,
478
+ "step": 330
479
+ },
480
+ {
481
+ "epoch": 0.409035409035409,
482
+ "grad_norm": 0.1734062852611153,
483
+ "learning_rate": 0.0007363442525942826,
484
+ "loss": 1.118,
485
+ "step": 335
486
+ },
487
+ {
488
+ "epoch": 0.41514041514041516,
489
+ "grad_norm": 0.16356872758418958,
490
+ "learning_rate": 0.0007269002928982366,
491
+ "loss": 1.1231,
492
+ "step": 340
493
+ },
494
+ {
495
+ "epoch": 0.42124542124542125,
496
+ "grad_norm": 0.17309361004903426,
497
+ "learning_rate": 0.0007173532653099911,
498
+ "loss": 1.1146,
499
+ "step": 345
500
+ },
501
+ {
502
+ "epoch": 0.42735042735042733,
503
+ "grad_norm": 0.1326003328898491,
504
+ "learning_rate": 0.0007077075065009433,
505
+ "loss": 1.136,
506
+ "step": 350
507
+ },
508
+ {
509
+ "epoch": 0.43345543345543347,
510
+ "grad_norm": 0.1811730240238145,
511
+ "learning_rate": 0.0006979673979904665,
512
+ "loss": 1.1276,
513
+ "step": 355
514
+ },
515
+ {
516
+ "epoch": 0.43956043956043955,
517
+ "grad_norm": 0.13629851189524775,
518
+ "learning_rate": 0.0006881373641556346,
519
+ "loss": 1.1113,
520
+ "step": 360
521
+ },
522
+ {
523
+ "epoch": 0.4456654456654457,
524
+ "grad_norm": 0.21374115697770382,
525
+ "learning_rate": 0.0006782218702214797,
526
+ "loss": 1.1078,
527
+ "step": 365
528
+ },
529
+ {
530
+ "epoch": 0.4517704517704518,
531
+ "grad_norm": 0.16886558971737456,
532
+ "learning_rate": 0.000668225420232694,
533
+ "loss": 1.1108,
534
+ "step": 370
535
+ },
536
+ {
537
+ "epoch": 0.45787545787545786,
538
+ "grad_norm": 0.16694922624121042,
539
+ "learning_rate": 0.0006581525550076989,
540
+ "loss": 1.1068,
541
+ "step": 375
542
+ },
543
+ {
544
+ "epoch": 0.463980463980464,
545
+ "grad_norm": 0.16014858561217288,
546
+ "learning_rate": 0.0006480078500760096,
547
+ "loss": 1.1185,
548
+ "step": 380
549
+ },
550
+ {
551
+ "epoch": 0.4700854700854701,
552
+ "grad_norm": 0.18799411654589882,
553
+ "learning_rate": 0.0006377959135998322,
554
+ "loss": 1.1143,
555
+ "step": 385
556
+ },
557
+ {
558
+ "epoch": 0.47619047619047616,
559
+ "grad_norm": 0.1270424719263773,
560
+ "learning_rate": 0.0006275213842808383,
561
+ "loss": 1.1055,
562
+ "step": 390
563
+ },
564
+ {
565
+ "epoch": 0.4822954822954823,
566
+ "grad_norm": 0.17394420991284418,
567
+ "learning_rate": 0.0006171889292530655,
568
+ "loss": 1.105,
569
+ "step": 395
570
+ },
571
+ {
572
+ "epoch": 0.4884004884004884,
573
+ "grad_norm": 0.15074553592858597,
574
+ "learning_rate": 0.0006068032419629059,
575
+ "loss": 1.1167,
576
+ "step": 400
577
+ },
578
+ {
579
+ "epoch": 0.4945054945054945,
580
+ "grad_norm": 0.1575327685510419,
581
+ "learning_rate": 0.0005963690400371386,
582
+ "loss": 1.105,
583
+ "step": 405
584
+ },
585
+ {
586
+ "epoch": 0.5006105006105006,
587
+ "grad_norm": 0.15495173159548875,
588
+ "learning_rate": 0.0005858910631399817,
589
+ "loss": 1.1079,
590
+ "step": 410
591
+ },
592
+ {
593
+ "epoch": 0.5067155067155067,
594
+ "grad_norm": 0.12495502826181124,
595
+ "learning_rate": 0.0005753740708201315,
596
+ "loss": 1.0961,
597
+ "step": 415
598
+ },
599
+ {
600
+ "epoch": 0.5128205128205128,
601
+ "grad_norm": 0.17798304801391704,
602
+ "learning_rate": 0.0005648228403487712,
603
+ "loss": 1.103,
604
+ "step": 420
605
+ },
606
+ {
607
+ "epoch": 0.518925518925519,
608
+ "grad_norm": 0.15145475180954737,
609
+ "learning_rate": 0.0005542421645495279,
610
+ "loss": 1.1082,
611
+ "step": 425
612
+ },
613
+ {
614
+ "epoch": 0.525030525030525,
615
+ "grad_norm": 0.1674135601603621,
616
+ "learning_rate": 0.0005436368496213656,
617
+ "loss": 1.0922,
618
+ "step": 430
619
+ },
620
+ {
621
+ "epoch": 0.5311355311355311,
622
+ "grad_norm": 0.1322258266693411,
623
+ "learning_rate": 0.0005330117129554028,
624
+ "loss": 1.0969,
625
+ "step": 435
626
+ },
627
+ {
628
+ "epoch": 0.5372405372405372,
629
+ "grad_norm": 0.16392404937146968,
630
+ "learning_rate": 0.0005223715809466454,
631
+ "loss": 1.1023,
632
+ "step": 440
633
+ },
634
+ {
635
+ "epoch": 0.5433455433455433,
636
+ "grad_norm": 0.15868256924573232,
637
+ "learning_rate": 0.0005117212868016303,
638
+ "loss": 1.0961,
639
+ "step": 445
640
+ },
641
+ {
642
+ "epoch": 0.5494505494505495,
643
+ "grad_norm": 0.1493324553731281,
644
+ "learning_rate": 0.0005010656683429746,
645
+ "loss": 1.0992,
646
+ "step": 450
647
+ },
648
+ {
649
+ "epoch": 0.5555555555555556,
650
+ "grad_norm": 0.13303968473914282,
651
+ "learning_rate": 0.0004904095658118283,
652
+ "loss": 1.0869,
653
+ "step": 455
654
+ },
655
+ {
656
+ "epoch": 0.5616605616605617,
657
+ "grad_norm": 0.13445179172013122,
658
+ "learning_rate": 0.0004797578196692281,
659
+ "loss": 1.0979,
660
+ "step": 460
661
+ },
662
+ {
663
+ "epoch": 0.5677655677655677,
664
+ "grad_norm": 0.1353400408710943,
665
+ "learning_rate": 0.00046911526839735093,
666
+ "loss": 1.1058,
667
+ "step": 465
668
+ },
669
+ {
670
+ "epoch": 0.5738705738705738,
671
+ "grad_norm": 0.15268434606287648,
672
+ "learning_rate": 0.0004584867463016671,
673
+ "loss": 1.0955,
674
+ "step": 470
675
+ },
676
+ {
677
+ "epoch": 0.57997557997558,
678
+ "grad_norm": 0.14945079736642003,
679
+ "learning_rate": 0.00044787708131499104,
680
+ "loss": 1.0829,
681
+ "step": 475
682
+ },
683
+ {
684
+ "epoch": 0.5860805860805861,
685
+ "grad_norm": 0.1308874678872484,
686
+ "learning_rate": 0.0004372910928044249,
687
+ "loss": 1.0899,
688
+ "step": 480
689
+ },
690
+ {
691
+ "epoch": 0.5921855921855922,
692
+ "grad_norm": 0.1315948608079688,
693
+ "learning_rate": 0.00042673358938219544,
694
+ "loss": 1.0864,
695
+ "step": 485
696
+ },
697
+ {
698
+ "epoch": 0.5982905982905983,
699
+ "grad_norm": 0.19438574034423878,
700
+ "learning_rate": 0.00041620936672137393,
701
+ "loss": 1.093,
702
+ "step": 490
703
+ },
704
+ {
705
+ "epoch": 0.6043956043956044,
706
+ "grad_norm": 0.12572375675032343,
707
+ "learning_rate": 0.00040572320537747656,
708
+ "loss": 1.0875,
709
+ "step": 495
710
+ },
711
+ {
712
+ "epoch": 0.6105006105006106,
713
+ "grad_norm": 0.12787583668085029,
714
+ "learning_rate": 0.0003952798686169279,
715
+ "loss": 1.1024,
716
+ "step": 500
717
+ },
718
+ {
719
+ "epoch": 0.6166056166056166,
720
+ "grad_norm": 0.14209081780825783,
721
+ "learning_rate": 0.00038488410025338133,
722
+ "loss": 1.0938,
723
+ "step": 505
724
+ },
725
+ {
726
+ "epoch": 0.6227106227106227,
727
+ "grad_norm": 0.13751458022764315,
728
+ "learning_rate": 0.00037454062249287477,
729
+ "loss": 1.0878,
730
+ "step": 510
731
+ },
732
+ {
733
+ "epoch": 0.6288156288156288,
734
+ "grad_norm": 0.13954841564889886,
735
+ "learning_rate": 0.0003642541337887999,
736
+ "loss": 1.0912,
737
+ "step": 515
738
+ },
739
+ {
740
+ "epoch": 0.6349206349206349,
741
+ "grad_norm": 0.14997321061776903,
742
+ "learning_rate": 0.00035402930670766296,
743
+ "loss": 1.0747,
744
+ "step": 520
745
+ },
746
+ {
747
+ "epoch": 0.6410256410256411,
748
+ "grad_norm": 0.1701293287916612,
749
+ "learning_rate": 0.00034387078580660346,
750
+ "loss": 1.0783,
751
+ "step": 525
752
+ },
753
+ {
754
+ "epoch": 0.6471306471306472,
755
+ "grad_norm": 0.12199767794093884,
756
+ "learning_rate": 0.00033378318552363664,
757
+ "loss": 1.0776,
758
+ "step": 530
759
+ },
760
+ {
761
+ "epoch": 0.6532356532356532,
762
+ "grad_norm": 0.16919824019980462,
763
+ "learning_rate": 0.0003237710880815756,
764
+ "loss": 1.0889,
765
+ "step": 535
766
+ },
767
+ {
768
+ "epoch": 0.6593406593406593,
769
+ "grad_norm": 0.13811611845852387,
770
+ "learning_rate": 0.00031383904140658986,
771
+ "loss": 1.0762,
772
+ "step": 540
773
+ },
774
+ {
775
+ "epoch": 0.6654456654456654,
776
+ "grad_norm": 0.15905169364412688,
777
+ "learning_rate": 0.0003039915570623396,
778
+ "loss": 1.082,
779
+ "step": 545
780
+ },
781
+ {
782
+ "epoch": 0.6715506715506715,
783
+ "grad_norm": 0.1296226609467321,
784
+ "learning_rate": 0.0002942331082006308,
785
+ "loss": 1.0724,
786
+ "step": 550
787
+ },
788
+ {
789
+ "epoch": 0.6776556776556777,
790
+ "grad_norm": 0.1330698219795046,
791
+ "learning_rate": 0.00028456812752951485,
792
+ "loss": 1.0723,
793
+ "step": 555
794
+ },
795
+ {
796
+ "epoch": 0.6837606837606838,
797
+ "grad_norm": 0.13173063625642664,
798
+ "learning_rate": 0.0002750010052997635,
799
+ "loss": 1.0783,
800
+ "step": 560
801
+ },
802
+ {
803
+ "epoch": 0.6898656898656899,
804
+ "grad_norm": 0.11827576804873147,
805
+ "learning_rate": 0.00026553608731062604,
806
+ "loss": 1.0734,
807
+ "step": 565
808
+ },
809
+ {
810
+ "epoch": 0.6959706959706959,
811
+ "grad_norm": 0.12044643950959122,
812
+ "learning_rate": 0.00025617767293578176,
813
+ "loss": 1.0695,
814
+ "step": 570
815
+ },
816
+ {
817
+ "epoch": 0.702075702075702,
818
+ "grad_norm": 0.12921730309691146,
819
+ "learning_rate": 0.0002469300131703773,
820
+ "loss": 1.0721,
821
+ "step": 575
822
+ },
823
+ {
824
+ "epoch": 0.7081807081807082,
825
+ "grad_norm": 0.12533218466426468,
826
+ "learning_rate": 0.00023779730870004235,
827
+ "loss": 1.0608,
828
+ "step": 580
829
+ },
830
+ {
831
+ "epoch": 0.7142857142857143,
832
+ "grad_norm": 0.12817655243582374,
833
+ "learning_rate": 0.00022878370799275777,
834
+ "loss": 1.0789,
835
+ "step": 585
836
+ },
837
+ {
838
+ "epoch": 0.7203907203907204,
839
+ "grad_norm": 0.1293778348063508,
840
+ "learning_rate": 0.0002198933054144414,
841
+ "loss": 1.0602,
842
+ "step": 590
843
+ },
844
+ {
845
+ "epoch": 0.7264957264957265,
846
+ "grad_norm": 0.1263148140338118,
847
+ "learning_rate": 0.00021113013936911113,
848
+ "loss": 1.0679,
849
+ "step": 595
850
+ },
851
+ {
852
+ "epoch": 0.7326007326007326,
853
+ "grad_norm": 0.13594429624621016,
854
+ "learning_rate": 0.00020249819046446837,
855
+ "loss": 1.0683,
856
+ "step": 600
857
+ },
858
+ {
859
+ "epoch": 0.7387057387057387,
860
+ "grad_norm": 0.11897323140031583,
861
+ "learning_rate": 0.00019400137970373356,
862
+ "loss": 1.0715,
863
+ "step": 605
864
+ },
865
+ {
866
+ "epoch": 0.7448107448107448,
867
+ "grad_norm": 0.12134111257996806,
868
+ "learning_rate": 0.00018564356670455767,
869
+ "loss": 1.0737,
870
+ "step": 610
871
+ },
872
+ {
873
+ "epoch": 0.7509157509157509,
874
+ "grad_norm": 0.12412837845655514,
875
+ "learning_rate": 0.00017742854794581785,
876
+ "loss": 1.077,
877
+ "step": 615
878
+ },
879
+ {
880
+ "epoch": 0.757020757020757,
881
+ "grad_norm": 0.10995117277188224,
882
+ "learning_rate": 0.00016936005504309342,
883
+ "loss": 1.0725,
884
+ "step": 620
885
+ },
886
+ {
887
+ "epoch": 0.7631257631257631,
888
+ "grad_norm": 0.12324463281674919,
889
+ "learning_rate": 0.0001614417530536042,
890
+ "loss": 1.0714,
891
+ "step": 625
892
+ },
893
+ {
894
+ "epoch": 0.7692307692307693,
895
+ "grad_norm": 0.12422719561341983,
896
+ "learning_rate": 0.00015367723881138434,
897
+ "loss": 1.0764,
898
+ "step": 630
899
+ },
900
+ {
901
+ "epoch": 0.7753357753357754,
902
+ "grad_norm": 0.15097612554081813,
903
+ "learning_rate": 0.00014607003929344492,
904
+ "loss": 1.0735,
905
+ "step": 635
906
+ },
907
+ {
908
+ "epoch": 0.7814407814407814,
909
+ "grad_norm": 0.10792336331652043,
910
+ "learning_rate": 0.00013862361001766972,
911
+ "loss": 1.0656,
912
+ "step": 640
913
+ },
914
+ {
915
+ "epoch": 0.7875457875457875,
916
+ "grad_norm": 0.10878726224284914,
917
+ "learning_rate": 0.00013134133347316885,
918
+ "loss": 1.054,
919
+ "step": 645
920
+ },
921
+ {
922
+ "epoch": 0.7936507936507936,
923
+ "grad_norm": 0.10538532292466002,
924
+ "learning_rate": 0.0001242265175838072,
925
+ "loss": 1.0565,
926
+ "step": 650
927
+ },
928
+ {
929
+ "epoch": 0.7997557997557998,
930
+ "grad_norm": 0.132655865253661,
931
+ "learning_rate": 0.00011728239420560316,
932
+ "loss": 1.0623,
933
+ "step": 655
934
+ },
935
+ {
936
+ "epoch": 0.8058608058608059,
937
+ "grad_norm": 0.13898152967373176,
938
+ "learning_rate": 0.0001105121176586793,
939
+ "loss": 1.062,
940
+ "step": 660
941
+ },
942
+ {
943
+ "epoch": 0.811965811965812,
944
+ "grad_norm": 0.1284900261818914,
945
+ "learning_rate": 0.00010391876329443534,
946
+ "loss": 1.0573,
947
+ "step": 665
948
+ },
949
+ {
950
+ "epoch": 0.818070818070818,
951
+ "grad_norm": 0.12601548049011016,
952
+ "learning_rate": 9.750532609858991e-05,
953
+ "loss": 1.0627,
954
+ "step": 670
955
+ },
956
+ {
957
+ "epoch": 0.8241758241758241,
958
+ "grad_norm": 0.10148390380785619,
959
+ "learning_rate": 9.127471933073007e-05,
960
+ "loss": 1.0689,
961
+ "step": 675
962
+ },
963
+ {
964
+ "epoch": 0.8302808302808303,
965
+ "grad_norm": 0.11101937832472626,
966
+ "learning_rate": 8.522977320098224e-05,
967
+ "loss": 1.0592,
968
+ "step": 680
969
+ },
970
+ {
971
+ "epoch": 0.8363858363858364,
972
+ "grad_norm": 0.11000851597542352,
973
+ "learning_rate": 7.937323358440934e-05,
974
+ "loss": 1.0613,
975
+ "step": 685
976
+ },
977
+ {
978
+ "epoch": 0.8424908424908425,
979
+ "grad_norm": 0.13212021122536566,
980
+ "learning_rate": 7.370776077371622e-05,
981
+ "loss": 1.055,
982
+ "step": 690
983
+ },
984
+ {
985
+ "epoch": 0.8485958485958486,
986
+ "grad_norm": 0.12501157813798788,
987
+ "learning_rate": 6.82359282708292e-05,
988
+ "loss": 1.0644,
989
+ "step": 695
990
+ },
991
+ {
992
+ "epoch": 0.8547008547008547,
993
+ "grad_norm": 0.12182549104138736,
994
+ "learning_rate": 6.296022161790149e-05,
995
+ "loss": 1.0686,
996
+ "step": 700
997
+ },
998
+ {
999
+ "epoch": 0.8608058608058609,
1000
+ "grad_norm": 0.10898863882704415,
1001
+ "learning_rate": 5.78830372682721e-05,
1002
+ "loss": 1.0595,
1003
+ "step": 705
1004
+ },
1005
+ {
1006
+ "epoch": 0.8669108669108669,
1007
+ "grad_norm": 0.1292362091327554,
1008
+ "learning_rate": 5.300668149789417e-05,
1009
+ "loss": 1.0628,
1010
+ "step": 710
1011
+ },
1012
+ {
1013
+ "epoch": 0.873015873015873,
1014
+ "grad_norm": 0.11135921961101819,
1015
+ "learning_rate": 4.833336935772442e-05,
1016
+ "loss": 1.0532,
1017
+ "step": 715
1018
+ },
1019
+ {
1020
+ "epoch": 0.8791208791208791,
1021
+ "grad_norm": 0.1040842333058969,
1022
+ "learning_rate": 4.386522366755169e-05,
1023
+ "loss": 1.0548,
1024
+ "step": 720
1025
+ },
1026
+ {
1027
+ "epoch": 0.8852258852258852,
1028
+ "grad_norm": 0.11114144762934852,
1029
+ "learning_rate": 3.960427405172079e-05,
1030
+ "loss": 1.0602,
1031
+ "step": 725
1032
+ },
1033
+ {
1034
+ "epoch": 0.8913308913308914,
1035
+ "grad_norm": 0.10851073660711448,
1036
+ "learning_rate": 3.5552456017189926e-05,
1037
+ "loss": 1.0615,
1038
+ "step": 730
1039
+ },
1040
+ {
1041
+ "epoch": 0.8974358974358975,
1042
+ "grad_norm": 0.1046722141969564,
1043
+ "learning_rate": 3.171161007433937e-05,
1044
+ "loss": 1.0542,
1045
+ "step": 735
1046
+ },
1047
+ {
1048
+ "epoch": 0.9035409035409036,
1049
+ "grad_norm": 0.11710428457476799,
1050
+ "learning_rate": 2.808348090093277e-05,
1051
+ "loss": 1.0671,
1052
+ "step": 740
1053
+ },
1054
+ {
1055
+ "epoch": 0.9096459096459096,
1056
+ "grad_norm": 0.11332319869107195,
1057
+ "learning_rate": 2.466971654960931e-05,
1058
+ "loss": 1.0611,
1059
+ "step": 745
1060
+ },
1061
+ {
1062
+ "epoch": 0.9157509157509157,
1063
+ "grad_norm": 0.10374082585506901,
1064
+ "learning_rate": 2.147186769926712e-05,
1065
+ "loss": 1.0498,
1066
+ "step": 750
1067
+ },
1068
+ {
1069
+ "epoch": 0.9218559218559218,
1070
+ "grad_norm": 0.11492942958557875,
1071
+ "learning_rate": 1.8491386950677812e-05,
1072
+ "loss": 1.0571,
1073
+ "step": 755
1074
+ },
1075
+ {
1076
+ "epoch": 0.927960927960928,
1077
+ "grad_norm": 0.11080015958091284,
1078
+ "learning_rate": 1.572962816665302e-05,
1079
+ "loss": 1.0558,
1080
+ "step": 760
1081
+ },
1082
+ {
1083
+ "epoch": 0.9340659340659341,
1084
+ "grad_norm": 0.117953492017612,
1085
+ "learning_rate": 1.3187845857061508e-05,
1086
+ "loss": 1.0462,
1087
+ "step": 765
1088
+ },
1089
+ {
1090
+ "epoch": 0.9401709401709402,
1091
+ "grad_norm": 0.11023247026103157,
1092
+ "learning_rate": 1.0867194608976228e-05,
1093
+ "loss": 1.0731,
1094
+ "step": 770
1095
+ },
1096
+ {
1097
+ "epoch": 0.9462759462759462,
1098
+ "grad_norm": 0.11752064314919194,
1099
+ "learning_rate": 8.768728562211947e-06,
1100
+ "loss": 1.058,
1101
+ "step": 775
1102
+ },
1103
+ {
1104
+ "epoch": 0.9523809523809523,
1105
+ "grad_norm": 0.10584711974817956,
1106
+ "learning_rate": 6.893400930488569e-06,
1107
+ "loss": 1.0638,
1108
+ "step": 780
1109
+ },
1110
+ {
1111
+ "epoch": 0.9584859584859585,
1112
+ "grad_norm": 0.12776920092469246,
1113
+ "learning_rate": 5.242063568441313e-06,
1114
+ "loss": 1.0633,
1115
+ "step": 785
1116
+ },
1117
+ {
1118
+ "epoch": 0.9645909645909646,
1119
+ "grad_norm": 0.10955827972683771,
1120
+ "learning_rate": 3.815466584670746e-06,
1121
+ "loss": 1.0533,
1122
+ "step": 790
1123
+ },
1124
+ {
1125
+ "epoch": 0.9706959706959707,
1126
+ "grad_norm": 0.11036039903845696,
1127
+ "learning_rate": 2.6142580010117823e-06,
1128
+ "loss": 1.0576,
1129
+ "step": 795
1130
+ },
1131
+ {
1132
+ "epoch": 0.9768009768009768,
1133
+ "grad_norm": 0.10123727837311189,
1134
+ "learning_rate": 1.6389834581739814e-06,
1135
+ "loss": 1.0609,
1136
+ "step": 800
1137
+ },
1138
+ {
1139
+ "epoch": 0.9829059829059829,
1140
+ "grad_norm": 0.11176827609005396,
1141
+ "learning_rate": 8.900859678879769e-07,
1142
+ "loss": 1.0575,
1143
+ "step": 805
1144
+ },
1145
+ {
1146
+ "epoch": 0.989010989010989,
1147
+ "grad_norm": 0.10493373702681778,
1148
+ "learning_rate": 3.6790571167061305e-07,
1149
+ "loss": 1.0604,
1150
+ "step": 810
1151
+ },
1152
+ {
1153
+ "epoch": 0.9951159951159951,
1154
+ "grad_norm": 0.11496441250413074,
1155
+ "learning_rate": 7.26798862996092e-08,
1156
+ "loss": 1.0534,
1157
+ "step": 815
1158
+ },
1159
+ {
1160
+ "epoch": 1.0,
1161
+ "eval_loss": 1.4183021783828735,
1162
+ "eval_runtime": 107.3754,
1163
+ "eval_samples_per_second": 195.501,
1164
+ "eval_steps_per_second": 6.109,
1165
+ "step": 819
1166
+ },
1167
+ {
1168
+ "epoch": 1.0,
1169
+ "step": 819,
1170
+ "total_flos": 45247522406400.0,
1171
+ "train_loss": 1.1312509008088536,
1172
+ "train_runtime": 1838.4003,
1173
+ "train_samples_per_second": 57.016,
1174
+ "train_steps_per_second": 0.445
1175
+ }
1176
+ ],
1177
+ "logging_steps": 5,
1178
+ "max_steps": 819,
1179
+ "num_input_tokens_seen": 0,
1180
+ "num_train_epochs": 1,
1181
+ "save_steps": 500,
1182
+ "stateful_callbacks": {
1183
+ "TrainerControl": {
1184
+ "args": {
1185
+ "should_epoch_stop": false,
1186
+ "should_evaluate": false,
1187
+ "should_log": false,
1188
+ "should_save": false,
1189
+ "should_training_stop": false
1190
+ },
1191
+ "attributes": {}
1192
+ }
1193
+ },
1194
+ "total_flos": 45247522406400.0,
1195
+ "train_batch_size": 4,
1196
+ "trial_name": null,
1197
+ "trial_params": null
1198
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcfe28d2c325010ce3e74d70f4ef93430765f8cff33274593c1b4345436ce872
3
+ size 6392
vocab.json ADDED
The diff for this file is too large to render. See raw diff