Commit
•
246cb21
1
Parent(s):
0b1b216
Upload folder using huggingface_hub
Browse files- checkpoint-188/config.json +28 -0
- checkpoint-188/generation_config.json +6 -0
- checkpoint-188/model.safetensors +3 -0
- checkpoint-188/optimizer.bin +3 -0
- checkpoint-188/pytorch_model_fsdp.bin +3 -0
- checkpoint-188/rng_state_0.pth +3 -0
- checkpoint-188/rng_state_1.pth +3 -0
- checkpoint-188/rng_state_2.pth +3 -0
- checkpoint-188/rng_state_3.pth +3 -0
- checkpoint-188/scheduler.pt +3 -0
- checkpoint-188/trainer_state.json +50 -0
- checkpoint-188/training_args.bin +3 -0
- checkpoint-376/config.json +28 -0
- checkpoint-376/generation_config.json +6 -0
- checkpoint-376/model.safetensors +3 -0
- checkpoint-376/optimizer.bin +3 -0
- checkpoint-376/pytorch_model_fsdp.bin +3 -0
- checkpoint-376/rng_state_0.pth +3 -0
- checkpoint-376/rng_state_1.pth +3 -0
- checkpoint-376/rng_state_2.pth +3 -0
- checkpoint-376/rng_state_3.pth +3 -0
- checkpoint-376/scheduler.pt +3 -0
- checkpoint-376/trainer_state.json +86 -0
- checkpoint-376/training_args.bin +3 -0
- config.json +28 -0
- generation_config.json +6 -0
- model.safetensors +3 -0
- training_args.bin +3 -0
checkpoint-188/config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Qwen/Qwen1.5-0.5B",
|
3 |
+
"architectures": [
|
4 |
+
"Qwen2ForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 151643,
|
8 |
+
"eos_token_id": 151643,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 1024,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 2816,
|
13 |
+
"max_position_embeddings": 32768,
|
14 |
+
"max_window_layers": 21,
|
15 |
+
"model_type": "qwen2",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_hidden_layers": 24,
|
18 |
+
"num_key_value_heads": 16,
|
19 |
+
"rms_norm_eps": 1e-06,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": 32768,
|
22 |
+
"tie_word_embeddings": true,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.39.2",
|
25 |
+
"use_cache": true,
|
26 |
+
"use_sliding_window": false,
|
27 |
+
"vocab_size": 151936
|
28 |
+
}
|
checkpoint-188/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 151643,
|
3 |
+
"eos_token_id": 151643,
|
4 |
+
"max_new_tokens": 2048,
|
5 |
+
"transformers_version": "4.39.2"
|
6 |
+
}
|
checkpoint-188/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf6bd33f164dcb0e408e712535d3eae23124a893713157c056c30d58707068a5
|
3 |
+
size 1239173352
|
checkpoint-188/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fefbc6315f53ae53ae2d0d1b59ebfbf975a76007140fc31f0461d957896138db
|
3 |
+
size 3712102842
|
checkpoint-188/pytorch_model_fsdp.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8360afcfd63bdfde022670dd9b6ab85040fcff6d7c95604f813ad0aa9ce7db42
|
3 |
+
size 1239275353
|
checkpoint-188/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:308f94f9a5c24e1bad5c393d56ae7af7782600f4e791d9c6ac35b22fff2105b6
|
3 |
+
size 15024
|
checkpoint-188/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b056f3c23cb32dc77a2ec9e7651e0b64e4440e21f0fdf969b86bfc56a1cbdf06
|
3 |
+
size 15024
|
checkpoint-188/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f3f8a05714bc528f4885a2816181652f2303b3e8150f89b56aaee6bec56aa520
|
3 |
+
size 15024
|
checkpoint-188/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f755bd3c330281961e5c03af9d10ce8c1e1678619d384f6f1fd5fd7dce2ff50
|
3 |
+
size 15024
|
checkpoint-188/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09909eb413267318bbfda18c09a55769856bdd30674e675b287d21ecf89be94b
|
3 |
+
size 1064
|
checkpoint-188/trainer_state.json
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 1.157950758934021,
|
3 |
+
"best_model_checkpoint": "./checkpoints/dpo-mix-7k/Qwen1.5-0.5B-dpo-mix-7k-lambda1.0-ORPO-29-9-20/checkpoint-188",
|
4 |
+
"epoch": 1.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 188,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.27,
|
13 |
+
"grad_norm": 676.0,
|
14 |
+
"learning_rate": 2.5e-06,
|
15 |
+
"loss": 15.3348,
|
16 |
+
"step": 50
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.53,
|
20 |
+
"grad_norm": 16.875,
|
21 |
+
"learning_rate": 5e-06,
|
22 |
+
"loss": 3.911,
|
23 |
+
"step": 100
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.8,
|
27 |
+
"grad_norm": 5.75,
|
28 |
+
"learning_rate": 4.6059265481735295e-06,
|
29 |
+
"loss": 1.1674,
|
30 |
+
"step": 150
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 1.0,
|
34 |
+
"eval_loss": 1.157950758934021,
|
35 |
+
"eval_runtime": 34.0907,
|
36 |
+
"eval_samples_per_second": 16.573,
|
37 |
+
"eval_steps_per_second": 1.056,
|
38 |
+
"step": 188
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"logging_steps": 50,
|
42 |
+
"max_steps": 376,
|
43 |
+
"num_input_tokens_seen": 0,
|
44 |
+
"num_train_epochs": 2,
|
45 |
+
"save_steps": 500,
|
46 |
+
"total_flos": 2849842200051712.0,
|
47 |
+
"train_batch_size": 4,
|
48 |
+
"trial_name": null,
|
49 |
+
"trial_params": null
|
50 |
+
}
|
checkpoint-188/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eed0362465920150b6a43b7c66f144cfd17c4f74e74d2787ed17f2ec04d4be42
|
3 |
+
size 5048
|
checkpoint-376/config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Qwen/Qwen1.5-0.5B",
|
3 |
+
"architectures": [
|
4 |
+
"Qwen2ForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 151643,
|
8 |
+
"eos_token_id": 151643,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 1024,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 2816,
|
13 |
+
"max_position_embeddings": 32768,
|
14 |
+
"max_window_layers": 21,
|
15 |
+
"model_type": "qwen2",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_hidden_layers": 24,
|
18 |
+
"num_key_value_heads": 16,
|
19 |
+
"rms_norm_eps": 1e-06,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": 32768,
|
22 |
+
"tie_word_embeddings": true,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.39.2",
|
25 |
+
"use_cache": true,
|
26 |
+
"use_sliding_window": false,
|
27 |
+
"vocab_size": 151936
|
28 |
+
}
|
checkpoint-376/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 151643,
|
3 |
+
"eos_token_id": 151643,
|
4 |
+
"max_new_tokens": 2048,
|
5 |
+
"transformers_version": "4.39.2"
|
6 |
+
}
|
checkpoint-376/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96fbb01c716da162eb7cda2ac6e39edf62470abf119a7e8098d6dd86bd0c5b17
|
3 |
+
size 1239173352
|
checkpoint-376/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28a1365a7fd5e576d118f36c7a171195056090c2e15c019fc224cd8f63945cfe
|
3 |
+
size 3712103162
|
checkpoint-376/pytorch_model_fsdp.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10c0e9381942dde5d3683a4aa11fbb5c321a8630ed0c6e06456a6557952d52c8
|
3 |
+
size 1239275353
|
checkpoint-376/rng_state_0.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:418a5f105ae834c3075024076916b2a9475918fe034c12d0dd5b6d91f1aba467
|
3 |
+
size 15024
|
checkpoint-376/rng_state_1.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e07ace389d24bc1307b74f42a1e7b8f0117b0db853e2df64ff3f15cb92916a2
|
3 |
+
size 15024
|
checkpoint-376/rng_state_2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da6a990f346d7014dffb28fa2bc7d3b890bd3c53712503fce3656da48d3d6e50
|
3 |
+
size 15024
|
checkpoint-376/rng_state_3.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e95f356ca38179b05993f55daece0223e96fa10b9a1b9ea2102a739211333f63
|
3 |
+
size 15024
|
checkpoint-376/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:beb76bbd6a34792885f955f41f4911956f6dde63e9bca79169199742ef22e6ce
|
3 |
+
size 1064
|
checkpoint-376/trainer_state.json
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 1.1512998342514038,
|
3 |
+
"best_model_checkpoint": "./checkpoints/dpo-mix-7k/Qwen1.5-0.5B-dpo-mix-7k-lambda1.0-ORPO-29-9-20/checkpoint-376",
|
4 |
+
"epoch": 2.0,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 376,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.27,
|
13 |
+
"grad_norm": 676.0,
|
14 |
+
"learning_rate": 2.5e-06,
|
15 |
+
"loss": 15.3348,
|
16 |
+
"step": 50
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.53,
|
20 |
+
"grad_norm": 16.875,
|
21 |
+
"learning_rate": 5e-06,
|
22 |
+
"loss": 3.911,
|
23 |
+
"step": 100
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.8,
|
27 |
+
"grad_norm": 5.75,
|
28 |
+
"learning_rate": 4.6059265481735295e-06,
|
29 |
+
"loss": 1.1674,
|
30 |
+
"step": 150
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 1.0,
|
34 |
+
"eval_loss": 1.157950758934021,
|
35 |
+
"eval_runtime": 34.0907,
|
36 |
+
"eval_samples_per_second": 16.573,
|
37 |
+
"eval_steps_per_second": 1.056,
|
38 |
+
"step": 188
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 1.06,
|
42 |
+
"grad_norm": 6.46875,
|
43 |
+
"learning_rate": 3.5479413010416606e-06,
|
44 |
+
"loss": 1.1863,
|
45 |
+
"step": 200
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"epoch": 1.33,
|
49 |
+
"grad_norm": 7.78125,
|
50 |
+
"learning_rate": 2.159583377259384e-06,
|
51 |
+
"loss": 1.1619,
|
52 |
+
"step": 250
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"epoch": 1.6,
|
56 |
+
"grad_norm": 5.46875,
|
57 |
+
"learning_rate": 8.785447763431101e-07,
|
58 |
+
"loss": 1.1292,
|
59 |
+
"step": 300
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"epoch": 1.86,
|
63 |
+
"grad_norm": 6.71875,
|
64 |
+
"learning_rate": 1.0868414100166314e-07,
|
65 |
+
"loss": 1.1423,
|
66 |
+
"step": 350
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"epoch": 2.0,
|
70 |
+
"eval_loss": 1.1512998342514038,
|
71 |
+
"eval_runtime": 34.1097,
|
72 |
+
"eval_samples_per_second": 16.564,
|
73 |
+
"eval_steps_per_second": 1.055,
|
74 |
+
"step": 376
|
75 |
+
}
|
76 |
+
],
|
77 |
+
"logging_steps": 50,
|
78 |
+
"max_steps": 376,
|
79 |
+
"num_input_tokens_seen": 0,
|
80 |
+
"num_train_epochs": 2,
|
81 |
+
"save_steps": 500,
|
82 |
+
"total_flos": 5699684433657856.0,
|
83 |
+
"train_batch_size": 4,
|
84 |
+
"trial_name": null,
|
85 |
+
"trial_params": null
|
86 |
+
}
|
checkpoint-376/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eed0362465920150b6a43b7c66f144cfd17c4f74e74d2787ed17f2ec04d4be42
|
3 |
+
size 5048
|
config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Qwen/Qwen1.5-0.5B",
|
3 |
+
"architectures": [
|
4 |
+
"Qwen2ForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 151643,
|
8 |
+
"eos_token_id": 151643,
|
9 |
+
"hidden_act": "silu",
|
10 |
+
"hidden_size": 1024,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 2816,
|
13 |
+
"max_position_embeddings": 32768,
|
14 |
+
"max_window_layers": 21,
|
15 |
+
"model_type": "qwen2",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_hidden_layers": 24,
|
18 |
+
"num_key_value_heads": 16,
|
19 |
+
"rms_norm_eps": 1e-06,
|
20 |
+
"rope_theta": 1000000.0,
|
21 |
+
"sliding_window": 32768,
|
22 |
+
"tie_word_embeddings": true,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"transformers_version": "4.39.2",
|
25 |
+
"use_cache": true,
|
26 |
+
"use_sliding_window": false,
|
27 |
+
"vocab_size": 151936
|
28 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 151643,
|
3 |
+
"eos_token_id": 151643,
|
4 |
+
"max_new_tokens": 2048,
|
5 |
+
"transformers_version": "4.39.2"
|
6 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96fbb01c716da162eb7cda2ac6e39edf62470abf119a7e8098d6dd86bd0c5b17
|
3 |
+
size 1239173352
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eed0362465920150b6a43b7c66f144cfd17c4f74e74d2787ed17f2ec04d4be42
|
3 |
+
size 5048
|