Gunslinger3D commited on
Commit
b83c80a
1 Parent(s): 5718af6

fine-tuning-Phi2-with-webglm-qa-with-lora_4

Browse files
Files changed (3) hide show
  1. README.md +51 -51
  2. adapter_config.json +5 -7
  3. adapter_model.safetensors +2 -2
README.md CHANGED
@@ -16,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 0.1147
20
 
21
  ## Model description
22
 
@@ -51,56 +51,56 @@ The following hyperparameters were used during training:
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:-----:|:----:|:---------------:|
54
- | 8.2444 | 0.2 | 10 | 7.8267 |
55
- | 7.4754 | 0.4 | 20 | 6.3605 |
56
- | 4.8314 | 0.6 | 30 | 3.1457 |
57
- | 1.7327 | 0.8 | 40 | 0.6363 |
58
- | 0.5438 | 1.0 | 50 | 0.5673 |
59
- | 0.4569 | 1.2 | 60 | 0.4906 |
60
- | 0.4491 | 1.39 | 70 | 0.4269 |
61
- | 0.367 | 1.59 | 80 | 0.3729 |
62
- | 0.2821 | 1.79 | 90 | 0.3323 |
63
- | 0.2414 | 1.99 | 100 | 0.3013 |
64
- | 0.2521 | 2.19 | 110 | 0.2772 |
65
- | 0.2135 | 2.39 | 120 | 0.2603 |
66
- | 0.1982 | 2.59 | 130 | 0.2446 |
67
- | 0.2186 | 2.79 | 140 | 0.2278 |
68
- | 0.1741 | 2.99 | 150 | 0.2144 |
69
- | 0.1781 | 3.19 | 160 | 0.2062 |
70
- | 0.1702 | 3.39 | 170 | 0.1928 |
71
- | 0.157 | 3.59 | 180 | 0.1846 |
72
- | 0.1469 | 3.78 | 190 | 0.1770 |
73
- | 0.1644 | 3.98 | 200 | 0.1705 |
74
- | 0.1458 | 4.18 | 210 | 0.1654 |
75
- | 0.1282 | 4.38 | 220 | 0.1623 |
76
- | 0.1537 | 4.58 | 230 | 0.1568 |
77
- | 0.1197 | 4.78 | 240 | 0.1509 |
78
- | 0.1327 | 4.98 | 250 | 0.1464 |
79
- | 0.1349 | 5.18 | 260 | 0.1436 |
80
- | 0.1052 | 5.38 | 270 | 0.1409 |
81
- | 0.127 | 5.58 | 280 | 0.1381 |
82
- | 0.1303 | 5.78 | 290 | 0.1365 |
83
- | 0.1063 | 5.98 | 300 | 0.1338 |
84
- | 0.1145 | 6.18 | 310 | 0.1300 |
85
- | 0.1101 | 6.37 | 320 | 0.1287 |
86
- | 0.1088 | 6.57 | 330 | 0.1280 |
87
- | 0.1062 | 6.77 | 340 | 0.1254 |
88
- | 0.1016 | 6.97 | 350 | 0.1238 |
89
- | 0.1005 | 7.17 | 360 | 0.1232 |
90
- | 0.1084 | 7.37 | 370 | 0.1220 |
91
- | 0.101 | 7.57 | 380 | 0.1204 |
92
- | 0.1065 | 7.77 | 390 | 0.1200 |
93
- | 0.0943 | 7.97 | 400 | 0.1191 |
94
- | 0.0848 | 8.17 | 410 | 0.1184 |
95
- | 0.0913 | 8.37 | 420 | 0.1175 |
96
- | 0.1115 | 8.57 | 430 | 0.1169 |
97
- | 0.091 | 8.76 | 440 | 0.1161 |
98
- | 0.1009 | 8.96 | 450 | 0.1154 |
99
- | 0.0966 | 9.16 | 460 | 0.1150 |
100
- | 0.0931 | 9.36 | 470 | 0.1147 |
101
- | 0.0922 | 9.56 | 480 | 0.1150 |
102
- | 0.0912 | 9.76 | 490 | 0.1148 |
103
- | 0.0915 | 9.96 | 500 | 0.1147 |
104
 
105
 
106
  ### Framework versions
 
16
 
17
  This model is a fine-tuned version of [microsoft/phi-2](https://huggingface.co/microsoft/phi-2) on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
+ - Loss: 0.2392
20
 
21
  ## Model description
22
 
 
51
 
52
  | Training Loss | Epoch | Step | Validation Loss |
53
  |:-------------:|:-----:|:----:|:---------------:|
54
+ | 8.19 | 0.2 | 10 | 7.9966 |
55
+ | 8.0261 | 0.4 | 20 | 7.7896 |
56
+ | 7.3527 | 0.6 | 30 | 7.2580 |
57
+ | 6.9568 | 0.8 | 40 | 5.9952 |
58
+ | 5.2411 | 1.0 | 50 | 3.7880 |
59
+ | 2.9772 | 1.2 | 60 | 1.8751 |
60
+ | 1.2384 | 1.39 | 70 | 0.7517 |
61
+ | 0.6916 | 1.59 | 80 | 0.6684 |
62
+ | 0.5669 | 1.79 | 90 | 0.6138 |
63
+ | 0.5195 | 1.99 | 100 | 0.5846 |
64
+ | 0.5281 | 2.19 | 110 | 0.5607 |
65
+ | 0.4764 | 2.39 | 120 | 0.5396 |
66
+ | 0.4655 | 2.59 | 130 | 0.5190 |
67
+ | 0.4787 | 2.79 | 140 | 0.4980 |
68
+ | 0.427 | 2.99 | 150 | 0.4765 |
69
+ | 0.41 | 3.19 | 160 | 0.4547 |
70
+ | 0.397 | 3.39 | 170 | 0.4317 |
71
+ | 0.3648 | 3.59 | 180 | 0.4087 |
72
+ | 0.3436 | 3.78 | 190 | 0.3863 |
73
+ | 0.3415 | 3.98 | 200 | 0.3661 |
74
+ | 0.3072 | 4.18 | 210 | 0.3481 |
75
+ | 0.2681 | 4.38 | 220 | 0.3341 |
76
+ | 0.3068 | 4.58 | 230 | 0.3201 |
77
+ | 0.2526 | 4.78 | 240 | 0.3095 |
78
+ | 0.2632 | 4.98 | 250 | 0.3003 |
79
+ | 0.2693 | 5.18 | 260 | 0.2936 |
80
+ | 0.2194 | 5.38 | 270 | 0.2874 |
81
+ | 0.2474 | 5.58 | 280 | 0.2826 |
82
+ | 0.2467 | 5.78 | 290 | 0.2770 |
83
+ | 0.2188 | 5.98 | 300 | 0.2726 |
84
+ | 0.2305 | 6.18 | 310 | 0.2690 |
85
+ | 0.2336 | 6.37 | 320 | 0.2643 |
86
+ | 0.2192 | 6.57 | 330 | 0.2614 |
87
+ | 0.2189 | 6.77 | 340 | 0.2588 |
88
+ | 0.2049 | 6.97 | 350 | 0.2564 |
89
+ | 0.2096 | 7.17 | 360 | 0.2540 |
90
+ | 0.221 | 7.37 | 370 | 0.2521 |
91
+ | 0.2167 | 7.57 | 380 | 0.2498 |
92
+ | 0.203 | 7.77 | 390 | 0.2484 |
93
+ | 0.1999 | 7.97 | 400 | 0.2469 |
94
+ | 0.1888 | 8.17 | 410 | 0.2458 |
95
+ | 0.195 | 8.37 | 420 | 0.2443 |
96
+ | 0.2358 | 8.57 | 430 | 0.2429 |
97
+ | 0.1929 | 8.76 | 440 | 0.2419 |
98
+ | 0.2066 | 8.96 | 450 | 0.2412 |
99
+ | 0.2101 | 9.16 | 460 | 0.2407 |
100
+ | 0.2009 | 9.36 | 470 | 0.2400 |
101
+ | 0.1976 | 9.56 | 480 | 0.2394 |
102
+ | 0.2013 | 9.76 | 490 | 0.2392 |
103
+ | 0.1956 | 9.96 | 500 | 0.2392 |
104
 
105
 
106
  ### Framework versions
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": null,
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -9,22 +9,20 @@
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
- "lora_alpha": 32,
13
  "lora_dropout": 0.05,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
- "r": 16,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
23
- "k_proj",
24
  "v_proj",
25
- "fc1",
26
- "dense",
27
- "fc2"
28
  ],
29
  "task_type": "CAUSAL_LM"
30
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/phi-2",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
+ "lora_alpha": 16,
13
  "lora_dropout": 0.05,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
+ "r": 8,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
  "q_proj",
 
23
  "v_proj",
24
+ "o_proj",
25
+ "k_proj"
 
26
  ],
27
  "task_type": "CAUSAL_LM"
28
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:103eca31b19d0b8519424a6ce1f462ba1dc0d548234ecc1bf25988187a19c7c0
3
- size 94428896
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d874c79eebb737b0ee2e68dcfe6c2191dccad0bbcffcbdf1fea45bfd0f89a1eb
3
+ size 15754072