WangXFng commited on
Commit
534a162
1 Parent(s): 6081ed4

Model save

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- base_model: meta-llama/Llama-3.2-3B-Instruct
3
  library_name: peft
4
  license: llama3.2
5
  tags:
@@ -14,7 +14,7 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # lora
16
 
17
- This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on an unknown dataset.
18
 
19
  ## Model description
20
 
@@ -42,11 +42,16 @@ The following hyperparameters were used during training:
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
- - num_epochs: 4
 
 
 
 
46
 
47
  ### Framework versions
48
 
49
  - PEFT 0.13.0
50
- - Transformers 4.43.4
51
  - Pytorch 2.4.0
52
- - Tokenizers 0.19.1
 
 
1
  ---
2
+ base_model: meta-llama/Llama-3.2-1B-Instruct
3
  library_name: peft
4
  license: llama3.2
5
  tags:
 
14
 
15
  # lora
16
 
17
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) on an unknown dataset.
18
 
19
  ## Model description
20
 
 
42
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
43
  - lr_scheduler_type: linear
44
  - lr_scheduler_warmup_steps: 2
45
+ - num_epochs: 2
46
+
47
+ ### Training results
48
+
49
+
50
 
51
  ### Framework versions
52
 
53
  - PEFT 0.13.0
54
+ - Transformers 4.45.2
55
  - Pytorch 2.4.0
56
+ - Datasets 2.21.0
57
+ - Tokenizers 0.20.0
adapter_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "base_model_class": "LlamaForCausalLM",
5
  "parent_library": "transformers.models.llama.modeling_llama"
6
  },
7
- "base_model_name_or_path": "meta-llama/Llama-3.2-3B-Instruct",
8
  "bias": "none",
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
@@ -23,13 +23,13 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
26
  "k_proj",
 
27
  "o_proj",
28
- "down_proj",
29
- "up_proj",
30
  "gate_proj",
31
- "v_proj",
32
- "q_proj"
33
  ],
34
  "task_type": null,
35
  "use_dora": true,
 
4
  "base_model_class": "LlamaForCausalLM",
5
  "parent_library": "transformers.models.llama.modeling_llama"
6
  },
7
+ "base_model_name_or_path": "meta-llama/Llama-3.2-1B-Instruct",
8
  "bias": "none",
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
+ "q_proj",
27
  "k_proj",
28
+ "v_proj",
29
  "o_proj",
 
 
30
  "gate_proj",
31
+ "down_proj",
32
+ "up_proj"
33
  ],
34
  "task_type": null,
35
  "use_dora": true,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f60d803e465c15a3b1329a1a39c05400aaf1f0b148f411bc29d7b9ad1b2794cc
3
- size 1639093264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87c6245eb4daaee1c4c5b51cba6948ff67053ebab3759dc1a01a4ca4fc4dc776
3
+ size 1082290000
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
trainer_state.json CHANGED
@@ -1,17 +1,55 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": null,
5
  "eval_steps": 500,
6
- "global_step": 0,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
- "log_history": [],
11
- "logging_steps": 500,
12
- "max_steps": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  "num_input_tokens_seen": 0,
14
- "num_train_epochs": 0,
15
  "save_steps": 500,
16
  "stateful_callbacks": {
17
  "TrainerControl": {
@@ -25,8 +63,8 @@
25
  "attributes": {}
26
  }
27
  },
28
- "total_flos": 0,
29
- "train_batch_size": null,
30
  "trial_name": null,
31
  "trial_params": null
32
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
  "eval_steps": 500,
6
+ "global_step": 1030,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.4854368932038835,
13
+ "grad_norm": 1.80476975440979,
14
+ "learning_rate": 7.587548638132295e-05,
15
+ "loss": 0.6951,
16
+ "step": 250
17
+ },
18
+ {
19
+ "epoch": 0.970873786407767,
20
+ "grad_norm": 1.3268828392028809,
21
+ "learning_rate": 5.155642023346303e-05,
22
+ "loss": 0.3819,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 1.4563106796116505,
27
+ "grad_norm": 1.490614414215088,
28
+ "learning_rate": 2.7237354085603113e-05,
29
+ "loss": 0.3553,
30
+ "step": 750
31
+ },
32
+ {
33
+ "epoch": 1.941747572815534,
34
+ "grad_norm": 1.4715696573257446,
35
+ "learning_rate": 2.9182879377431908e-06,
36
+ "loss": 0.3348,
37
+ "step": 1000
38
+ },
39
+ {
40
+ "epoch": 2.0,
41
+ "step": 1030,
42
+ "total_flos": 2.5008097487007744e+17,
43
+ "train_loss": 0.43835841891835037,
44
+ "train_runtime": 12656.9656,
45
+ "train_samples_per_second": 20.832,
46
+ "train_steps_per_second": 0.081
47
+ }
48
+ ],
49
+ "logging_steps": 250,
50
+ "max_steps": 1030,
51
  "num_input_tokens_seen": 0,
52
+ "num_train_epochs": 2,
53
  "save_steps": 500,
54
  "stateful_callbacks": {
55
  "TrainerControl": {
 
63
  "attributes": {}
64
  }
65
  },
66
+ "total_flos": 2.5008097487007744e+17,
67
+ "train_batch_size": 16,
68
  "trial_name": null,
69
  "trial_params": null
70
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:433a5ba8b7a30a164725af7d6b34b7b3888f93ad48028cccf8be71dc4fe79bbf
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8e0dae5103974b746d2fdd4495fd0e5453b289bdb72e5f359295701cf7eb031
3
  size 5176