tanliboy commited on
Commit
43e2c82
1 Parent(s): ba96ecf

End of training

Browse files
README.md CHANGED
@@ -3,10 +3,16 @@ library_name: transformers
3
  license: llama3.2
4
  base_model: tanliboy/llama-3.2-3b
5
  tags:
 
 
 
 
6
  - trl
7
  - sft
8
  - alignment-handbook
9
  - generated_from_trainer
 
 
10
  model-index:
11
  - name: llama-3.2-3b-sft
12
  results: []
@@ -17,7 +23,7 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # llama-3.2-3b-sft
19
 
20
- This model is a fine-tuned version of [tanliboy/llama-3.2-3b](https://huggingface.co/tanliboy/llama-3.2-3b) on an unknown dataset.
21
  It achieves the following results on the evaluation set:
22
  - Loss: 0.7216
23
 
 
3
  license: llama3.2
4
  base_model: tanliboy/llama-3.2-3b
5
  tags:
6
+ - alignment-handbook
7
+ - trl
8
+ - sft
9
+ - generated_from_trainer
10
  - trl
11
  - sft
12
  - alignment-handbook
13
  - generated_from_trainer
14
+ datasets:
15
+ - tanliboy/OpenHermes-2.5-reformat
16
  model-index:
17
  - name: llama-3.2-3b-sft
18
  results: []
 
23
 
24
  # llama-3.2-3b-sft
25
 
26
+ This model is a fine-tuned version of [tanliboy/llama-3.2-3b](https://huggingface.co/tanliboy/llama-3.2-3b) on the tanliboy/OpenHermes-2.5-reformat dataset.
27
  It achieves the following results on the evaluation set:
28
  - Loss: 0.7216
29
 
all_results.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 0.7042345404624939,
4
- "eval_runtime": 379.064,
5
  "eval_samples": 50077,
6
- "eval_samples_per_second": 132.107,
7
- "eval_steps_per_second": 2.066,
8
  "total_flos": 250303561007104.0,
9
  "train_loss": 0.7492096503219262,
10
  "train_runtime": 18007.2993,
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 0.7215889096260071,
4
+ "eval_runtime": 406.2872,
5
  "eval_samples": 50077,
6
+ "eval_samples_per_second": 123.255,
7
+ "eval_steps_per_second": 1.927,
8
  "total_flos": 250303561007104.0,
9
  "train_loss": 0.7492096503219262,
10
  "train_runtime": 18007.2993,
config.json CHANGED
@@ -35,6 +35,6 @@
35
  "tie_word_embeddings": true,
36
  "torch_dtype": "bfloat16",
37
  "transformers_version": "4.44.2",
38
- "use_cache": false,
39
  "vocab_size": 128256
40
  }
 
35
  "tie_word_embeddings": true,
36
  "torch_dtype": "bfloat16",
37
  "transformers_version": "4.44.2",
38
+ "use_cache": true,
39
  "vocab_size": 128256
40
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_loss": 0.7042345404624939,
4
- "eval_runtime": 379.064,
5
  "eval_samples": 50077,
6
- "eval_samples_per_second": 132.107,
7
- "eval_steps_per_second": 2.066
8
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_loss": 0.7215889096260071,
4
+ "eval_runtime": 406.2872,
5
  "eval_samples": 50077,
6
+ "eval_samples_per_second": 123.255,
7
+ "eval_steps_per_second": 1.927
8
  }
runs/Sep27_20-11-03_action-graph-trainer/events.out.tfevents.1727486849.action-graph-trainer.430650.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:894ced3b0de082b8f2ee2e374b2f08a5746b5ff63fce46384b75744474f50a1f
3
+ size 359