Erick03 commited on
Commit
d684571
1 Parent(s): d647180

End of training

Browse files
Files changed (5) hide show
  1. README.md +7 -3
  2. all_results.json +9 -9
  3. eval_results.json +4 -4
  4. train_results.json +5 -5
  5. trainer_state.json +13 -13
README.md CHANGED
@@ -1,17 +1,21 @@
1
  ---
2
  base_model: HuggingFaceTB/SmolLM2-135M-Instruct
 
3
  library_name: transformers
4
- model_name: HFTB-SmolLM2-135M-Instruct-OTCMedicinePHv2
5
  tags:
6
  - generated_from_trainer
 
 
 
7
  - trl
8
  - sft
9
  licence: license
10
  ---
11
 
12
- # Model Card for HFTB-SmolLM2-135M-Instruct-OTCMedicinePHv2
13
 
14
- This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-135M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
1
  ---
2
  base_model: HuggingFaceTB/SmolLM2-135M-Instruct
3
+ datasets: Crimsoin/OTC_Medicine_PH_v2
4
  library_name: transformers
5
+ model_name: HuggingFaceTB/SmolLM2-135M-Instruct
6
  tags:
7
  - generated_from_trainer
8
+ - question-answering
9
+ - QA
10
+ - text-generation
11
  - trl
12
  - sft
13
  licence: license
14
  ---
15
 
16
+ # Model Card for HuggingFaceTB/SmolLM2-135M-Instruct
17
 
18
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-135M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct) on the [Crimsoin/OTC_Medicine_PH_v2](https://huggingface.co/datasets/Crimsoin/OTC_Medicine_PH_v2) dataset.
19
  It has been trained using [TRL](https://github.com/huggingface/trl).
20
 
21
  ## Quick start
all_results.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
  "epoch": 3.883495145631068,
3
- "eval_loss": 0.1291082799434662,
4
- "eval_runtime": 14.8078,
5
- "eval_samples_per_second": 35.792,
6
- "eval_steps_per_second": 4.525,
7
- "total_flos": 258445456835328.0,
8
- "train_loss": 0.47970603942871093,
9
- "train_runtime": 930.7066,
10
- "train_samples_per_second": 5.157,
11
- "train_steps_per_second": 0.107
12
  }
 
1
  {
2
  "epoch": 3.883495145631068,
3
+ "eval_loss": 0.14737741649150848,
4
+ "eval_runtime": 14.0975,
5
+ "eval_samples_per_second": 37.595,
6
+ "eval_steps_per_second": 4.753,
7
+ "total_flos": 258162252195456.0,
8
+ "train_loss": 0.4854153060913086,
9
+ "train_runtime": 885.215,
10
+ "train_samples_per_second": 5.422,
11
+ "train_steps_per_second": 0.113
12
  }
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 3.883495145631068,
3
- "eval_loss": 0.1291082799434662,
4
- "eval_runtime": 14.8078,
5
- "eval_samples_per_second": 35.792,
6
- "eval_steps_per_second": 4.525
7
  }
 
1
  {
2
  "epoch": 3.883495145631068,
3
+ "eval_loss": 0.14737741649150848,
4
+ "eval_runtime": 14.0975,
5
+ "eval_samples_per_second": 37.595,
6
+ "eval_steps_per_second": 4.753
7
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 3.883495145631068,
3
- "total_flos": 258445456835328.0,
4
- "train_loss": 0.47970603942871093,
5
- "train_runtime": 930.7066,
6
- "train_samples_per_second": 5.157,
7
- "train_steps_per_second": 0.107
8
  }
 
1
  {
2
  "epoch": 3.883495145631068,
3
+ "total_flos": 258162252195456.0,
4
+ "train_loss": 0.4854153060913086,
5
+ "train_runtime": 885.215,
6
+ "train_samples_per_second": 5.422,
7
+ "train_steps_per_second": 0.113
8
  }
trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.12910962104797363,
3
  "best_model_checkpoint": "./HFTB-SmolLM2-135M-Instruct-OTCMedicinePHv2\\checkpoint-100",
4
  "epoch": 3.883495145631068,
5
  "eval_steps": 100,
@@ -10,27 +10,27 @@
10
  "log_history": [
11
  {
12
  "epoch": 3.883495145631068,
13
- "grad_norm": 0.26728370785713196,
14
  "learning_rate": 0.0005,
15
- "loss": 0.4797,
16
  "step": 100
17
  },
18
  {
19
  "epoch": 3.883495145631068,
20
- "eval_loss": 0.12910962104797363,
21
- "eval_runtime": 14.3923,
22
- "eval_samples_per_second": 36.825,
23
- "eval_steps_per_second": 4.655,
24
  "step": 100
25
  },
26
  {
27
  "epoch": 3.883495145631068,
28
  "step": 100,
29
- "total_flos": 258445456835328.0,
30
- "train_loss": 0.47970603942871093,
31
- "train_runtime": 930.7066,
32
- "train_samples_per_second": 5.157,
33
- "train_steps_per_second": 0.107
34
  }
35
  ],
36
  "logging_steps": 100,
@@ -59,7 +59,7 @@
59
  "attributes": {}
60
  }
61
  },
62
- "total_flos": 258445456835328.0,
63
  "train_batch_size": 4,
64
  "trial_name": null,
65
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.14742426574230194,
3
  "best_model_checkpoint": "./HFTB-SmolLM2-135M-Instruct-OTCMedicinePHv2\\checkpoint-100",
4
  "epoch": 3.883495145631068,
5
  "eval_steps": 100,
 
10
  "log_history": [
11
  {
12
  "epoch": 3.883495145631068,
13
+ "grad_norm": 0.20863692462444305,
14
  "learning_rate": 0.0005,
15
+ "loss": 0.4854,
16
  "step": 100
17
  },
18
  {
19
  "epoch": 3.883495145631068,
20
+ "eval_loss": 0.14742426574230194,
21
+ "eval_runtime": 13.1852,
22
+ "eval_samples_per_second": 40.197,
23
+ "eval_steps_per_second": 5.081,
24
  "step": 100
25
  },
26
  {
27
  "epoch": 3.883495145631068,
28
  "step": 100,
29
+ "total_flos": 258162252195456.0,
30
+ "train_loss": 0.4854153060913086,
31
+ "train_runtime": 885.215,
32
+ "train_samples_per_second": 5.422,
33
+ "train_steps_per_second": 0.113
34
  }
35
  ],
36
  "logging_steps": 100,
 
59
  "attributes": {}
60
  }
61
  },
62
+ "total_flos": 258162252195456.0,
63
  "train_batch_size": 4,
64
  "trial_name": null,
65
  "trial_params": null