yongtaek-lim commited on
Commit
fc4be0a
1 Parent(s): 4b8b974

End of training

Browse files
README.md CHANGED
@@ -6,6 +6,8 @@ tags:
6
  - llama-factory
7
  - full
8
  - generated_from_trainer
 
 
9
  model-index:
10
  - name: pogny
11
  results: []
@@ -16,7 +18,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # pogny
18
 
19
- This model is a fine-tuned version of [MLP-KTLim/llama-3-Korean-Bllossom-8B](https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B) on an unknown dataset.
 
 
 
20
 
21
  ## Model description
22
 
 
6
  - llama-factory
7
  - full
8
  - generated_from_trainer
9
+ metrics:
10
+ - accuracy
11
  model-index:
12
  - name: pogny
13
  results: []
 
18
 
19
  # pogny
20
 
21
+ This model is a fine-tuned version of [MLP-KTLim/llama-3-Korean-Bllossom-8B](https://huggingface.co/MLP-KTLim/llama-3-Korean-Bllossom-8B) on the alpaca_en_demo dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 1.2285
24
+ - Accuracy: 0.6567
25
 
26
  ## Model description
27
 
all_results.json CHANGED
@@ -2,12 +2,12 @@
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
- "eval_runtime": 0.9891,
6
- "eval_samples_per_second": 7.077,
7
- "eval_steps_per_second": 3.033,
8
  "total_flos": 18070241280.0,
9
  "train_loss": 1.165506362915039,
10
- "train_runtime": 157.3565,
11
- "train_samples_per_second": 0.35,
12
- "train_steps_per_second": 0.006
13
  }
 
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
+ "eval_runtime": 0.9711,
6
+ "eval_samples_per_second": 7.208,
7
+ "eval_steps_per_second": 3.089,
8
  "total_flos": 18070241280.0,
9
  "train_loss": 1.165506362915039,
10
+ "train_runtime": 221.1267,
11
+ "train_samples_per_second": 0.249,
12
+ "train_steps_per_second": 0.005
13
  }
eval_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
- "eval_runtime": 0.9891,
6
- "eval_samples_per_second": 7.077,
7
- "eval_steps_per_second": 3.033
8
  }
 
2
  "epoch": 0.8,
3
  "eval_accuracy": 0.6567137677288996,
4
  "eval_loss": 1.2285137176513672,
5
+ "eval_runtime": 0.9711,
6
+ "eval_samples_per_second": 7.208,
7
+ "eval_steps_per_second": 3.089
8
  }
runs/Aug28_02-50-12_main1/events.out.tfevents.1724814009.main1.35304.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:150304073ee76447f6c5038cafb48c2a7dc5ceaf3d357572870df1f1c30c52a5
3
+ size 357
train_results.json CHANGED
@@ -2,7 +2,7 @@
2
  "epoch": 0.8,
3
  "total_flos": 18070241280.0,
4
  "train_loss": 1.165506362915039,
5
- "train_runtime": 157.3565,
6
- "train_samples_per_second": 0.35,
7
- "train_steps_per_second": 0.006
8
  }
 
2
  "epoch": 0.8,
3
  "total_flos": 18070241280.0,
4
  "train_loss": 1.165506362915039,
5
+ "train_runtime": 221.1267,
6
+ "train_samples_per_second": 0.249,
7
+ "train_steps_per_second": 0.005
8
  }
trainer_state.json CHANGED
@@ -13,9 +13,9 @@
13
  "step": 1,
14
  "total_flos": 18070241280.0,
15
  "train_loss": 1.165506362915039,
16
- "train_runtime": 157.3565,
17
- "train_samples_per_second": 0.35,
18
- "train_steps_per_second": 0.006
19
  }
20
  ],
21
  "logging_steps": 10,
 
13
  "step": 1,
14
  "total_flos": 18070241280.0,
15
  "train_loss": 1.165506362915039,
16
+ "train_runtime": 221.1267,
17
+ "train_samples_per_second": 0.249,
18
+ "train_steps_per_second": 0.005
19
  }
20
  ],
21
  "logging_steps": 10,