ucmp137538 commited on
Commit
350dbc1
1 Parent(s): 6729cbf

End of training

Browse files
README.md CHANGED
@@ -17,7 +17,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 1.0580
21
 
22
  ## Model description
23
 
@@ -50,10 +50,10 @@ The following hyperparameters were used during training:
50
 
51
  | Training Loss | Epoch | Step | Validation Loss |
52
  |:-------------:|:-----:|:----:|:---------------:|
53
- | 1.2103 | 1.0 | 694 | 1.0155 |
54
- | 1.0049 | 2.0 | 1388 | 1.0118 |
55
- | 0.7344 | 3.0 | 2082 | 1.0341 |
56
- | 0.603 | 4.0 | 2776 | 1.0580 |
57
 
58
 
59
  ### Framework versions
 
17
 
18
  This model is a fine-tuned version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 1.0668
21
 
22
  ## Model description
23
 
 
50
 
51
  | Training Loss | Epoch | Step | Validation Loss |
52
  |:-------------:|:-----:|:----:|:---------------:|
53
+ | 1.166 | 1.0 | 694 | 1.0270 |
54
+ | 1.0118 | 2.0 | 1388 | 1.0216 |
55
+ | 0.7775 | 3.0 | 2082 | 1.0420 |
56
+ | 0.6204 | 4.0 | 2776 | 1.0668 |
57
 
58
 
59
  ### Framework versions
adapter_config.json CHANGED
@@ -19,8 +19,8 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "q_proj",
23
- "v_proj"
24
  ],
25
  "task_type": "CAUSAL_LM",
26
  "use_rslora": false
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "v_proj",
23
+ "q_proj"
24
  ],
25
  "task_type": "CAUSAL_LM",
26
  "use_rslora": false
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:914b9e0c1ce4fe87d719ddb8f73c9ebee304242bf47f21111902f493bdf78491
3
  size 1182877280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31538b96d7754c6b768c5a43f817f09bd574546c87981b1060d7b4d72f5e9bb5
3
  size 1182877280
runs/Mar25_03-51-16_41c6ac6af299/events.out.tfevents.1711338681.41c6ac6af299.3195.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75797c67dcce0d05eaadfad71fc65ecac11bc186125437d2820559b1c838cc4f
3
+ size 23634
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 1024,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc96f5993dbccf3433c416ff2bfa43d56581d187d092ac4fef7750fdaac3e828
3
  size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c47e0cb83060c1e51084db6f675bedf6c76e89403672c930731d185f41e6d76
3
  size 4728