dmariko commited on
Commit
75f1b0e
1 Parent(s): 12ff2ee

SmolLM-360M-Instruct_fsdp_qlora_nf4_adapter-plaba

Browse files
README.md CHANGED
@@ -1,10 +1,11 @@
1
  ---
 
2
  library_name: peft
3
  tags:
4
  - trl
5
  - sft
6
  - generated_from_trainer
7
- base_model: dmariko/SmolLM-360M-Instruct_qlora_nf4_merged
8
  datasets:
9
  - generator
10
  model-index:
@@ -17,9 +18,9 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  # SmolLM_360M_Instruct_qlora_nf4-plaba
19
 
20
- This model is a fine-tuned version of [dmariko/SmolLM-360M-Instruct_qlora_nf4_merged](https://huggingface.co/dmariko/SmolLM-360M-Instruct_qlora_nf4_merged) on the generator dataset.
21
  It achieves the following results on the evaluation set:
22
- - Loss: 1.7869
23
 
24
  ## Model description
25
 
@@ -44,8 +45,8 @@ The following hyperparameters were used during training:
44
  - seed: 42
45
  - distributed_type: multi-GPU
46
  - num_devices: 8
47
- - gradient_accumulation_steps: 8
48
- - total_train_batch_size: 256
49
  - total_eval_batch_size: 32
50
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
  - lr_scheduler_type: cosine
@@ -56,15 +57,15 @@ The following hyperparameters were used during training:
56
 
57
  | Training Loss | Epoch | Step | Validation Loss |
58
  |:-------------:|:------:|:----:|:---------------:|
59
- | No log | 0.8889 | 1 | 1.8457 |
60
- | No log | 1.7778 | 2 | 1.8424 |
61
- | No log | 2.6667 | 3 | 1.8318 |
62
- | No log | 3.5556 | 4 | 1.8130 |
63
- | No log | 4.4444 | 5 | 1.7966 |
64
- | No log | 5.3333 | 6 | 1.7906 |
65
- | No log | 6.2222 | 7 | 1.7891 |
66
- | No log | 8.0 | 9 | 1.7869 |
67
- | 1.8097 | 8.8889 | 10 | 1.7869 |
68
 
69
 
70
  ### Framework versions
 
1
  ---
2
+ license: apache-2.0
3
  library_name: peft
4
  tags:
5
  - trl
6
  - sft
7
  - generated_from_trainer
8
+ base_model: HuggingFaceTB/SmolLM-360M-Instruct
9
  datasets:
10
  - generator
11
  model-index:
 
18
 
19
  # SmolLM_360M_Instruct_qlora_nf4-plaba
20
 
21
+ This model is a fine-tuned version of [HuggingFaceTB/SmolLM-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM-360M-Instruct) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
+ - Loss: 1.8521
24
 
25
  ## Model description
26
 
 
45
  - seed: 42
46
  - distributed_type: multi-GPU
47
  - num_devices: 8
48
+ - gradient_accumulation_steps: 4
49
+ - total_train_batch_size: 128
50
  - total_eval_batch_size: 32
51
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
  - lr_scheduler_type: cosine
 
57
 
58
  | Training Loss | Epoch | Step | Validation Loss |
59
  |:-------------:|:------:|:----:|:---------------:|
60
+ | No log | 0.8889 | 2 | 2.0708 |
61
+ | No log | 1.7778 | 4 | 2.0152 |
62
+ | No log | 2.6667 | 6 | 1.9361 |
63
+ | No log | 4.0 | 9 | 1.8851 |
64
+ | 1.9803 | 4.8889 | 11 | 1.8728 |
65
+ | 1.9803 | 5.7778 | 13 | 1.8640 |
66
+ | 1.9803 | 6.6667 | 15 | 1.8571 |
67
+ | 1.9803 | 8.0 | 18 | 1.8525 |
68
+ | 1.8574 | 8.8889 | 20 | 1.8521 |
69
 
70
 
71
  ### Framework versions
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "dmariko/SmolLM-360M-Instruct_qlora_nf4_merged",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "up_proj",
24
  "k_proj",
25
  "o_proj",
26
- "v_proj",
27
  "down_proj",
28
- "q_proj",
29
- "gate_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "HuggingFaceTB/SmolLM-360M-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "q_proj",
24
+ "gate_proj",
25
  "up_proj",
26
  "k_proj",
27
  "o_proj",
 
28
  "down_proj",
29
+ "v_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d44f8402fd6da2794b9ae0093ebc0f4d9973d8dbbeac3d1e09bafb097f7dd14f
3
  size 17426248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac340124448b9a18bc5250faa0c2829d03be7f39f7b70eb21464d72f92f961f7
3
  size 17426248
runs/Sep11_13-47-01_algo-1/events.out.tfevents.1726062438.algo-1.67.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:446c05a60215813ef3bb641ab1b8609b638bb6a519bfaec50a159dd52f355ea6
3
+ size 8653
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6d1f97830e56883df7406ca18eeaad7ea671656e1b4b2a9779d5a8f9583328f
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eddac8862e172dfae3bb1b1b82346b3e27c46a1a2e1d371d77f227684fc9153
3
  size 5240