barissglc commited on
Commit
c4dee81
1 Parent(s): aa20a51

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. meta-llama/checkpoint-100/README.md +32 -0
  2. meta-llama/checkpoint-100/adapter_config.json +21 -0
  3. meta-llama/checkpoint-100/adapter_model.bin +3 -0
  4. meta-llama/checkpoint-100/optimizer.pt +3 -0
  5. meta-llama/checkpoint-100/rng_state.pth +3 -0
  6. meta-llama/checkpoint-100/scheduler.pt +3 -0
  7. meta-llama/checkpoint-100/special_tokens_map.json +24 -0
  8. meta-llama/checkpoint-100/tokenizer.json +0 -0
  9. meta-llama/checkpoint-100/tokenizer_config.json +33 -0
  10. meta-llama/checkpoint-100/trainer_state.json +40 -0
  11. meta-llama/checkpoint-100/training_args.bin +3 -0
  12. meta-llama/checkpoint-125/README.md +32 -0
  13. meta-llama/checkpoint-125/adapter_config.json +21 -0
  14. meta-llama/checkpoint-125/adapter_model.bin +3 -0
  15. meta-llama/checkpoint-125/optimizer.pt +3 -0
  16. meta-llama/checkpoint-125/rng_state.pth +3 -0
  17. meta-llama/checkpoint-125/scheduler.pt +3 -0
  18. meta-llama/checkpoint-125/special_tokens_map.json +24 -0
  19. meta-llama/checkpoint-125/tokenizer.json +0 -0
  20. meta-llama/checkpoint-125/tokenizer_config.json +33 -0
  21. meta-llama/checkpoint-125/trainer_state.json +46 -0
  22. meta-llama/checkpoint-125/training_args.bin +3 -0
  23. meta-llama/checkpoint-150/README.md +32 -0
  24. meta-llama/checkpoint-150/adapter_config.json +21 -0
  25. meta-llama/checkpoint-150/adapter_model.bin +3 -0
  26. meta-llama/checkpoint-150/optimizer.pt +3 -0
  27. meta-llama/checkpoint-150/rng_state.pth +3 -0
  28. meta-llama/checkpoint-150/scheduler.pt +3 -0
  29. meta-llama/checkpoint-150/special_tokens_map.json +24 -0
  30. meta-llama/checkpoint-150/tokenizer.json +0 -0
  31. meta-llama/checkpoint-150/tokenizer_config.json +33 -0
  32. meta-llama/checkpoint-150/trainer_state.json +52 -0
  33. meta-llama/checkpoint-150/training_args.bin +3 -0
  34. meta-llama/checkpoint-175/README.md +32 -0
  35. meta-llama/checkpoint-175/adapter_config.json +21 -0
  36. meta-llama/checkpoint-175/adapter_model.bin +3 -0
  37. meta-llama/checkpoint-175/optimizer.pt +3 -0
  38. meta-llama/checkpoint-175/rng_state.pth +3 -0
  39. meta-llama/checkpoint-175/scheduler.pt +3 -0
  40. meta-llama/checkpoint-175/special_tokens_map.json +24 -0
  41. meta-llama/checkpoint-175/tokenizer.json +0 -0
  42. meta-llama/checkpoint-175/tokenizer_config.json +33 -0
  43. meta-llama/checkpoint-175/trainer_state.json +58 -0
  44. meta-llama/checkpoint-175/training_args.bin +3 -0
  45. meta-llama/checkpoint-200/README.md +32 -0
  46. meta-llama/checkpoint-200/adapter_config.json +21 -0
  47. meta-llama/checkpoint-200/adapter_model.bin +3 -0
  48. meta-llama/checkpoint-200/optimizer.pt +3 -0
  49. meta-llama/checkpoint-200/rng_state.pth +3 -0
  50. meta-llama/checkpoint-200/scheduler.pt +3 -0
meta-llama/checkpoint-100/README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float16
17
+
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: False
27
+ - bnb_4bit_compute_dtype: float16
28
+ ### Framework versions
29
+
30
+ - PEFT 0.4.0
31
+
32
+ - PEFT 0.4.0
meta-llama/checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
meta-llama/checkpoint-100/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bbe9feb9459905e1d4ba4736a2b55823d6df8d5cccfb865efd8eedd463f17eb
3
+ size 209773322
meta-llama/checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70a419beb4e614b8c651f50ed7a8f26331b5034dcc6762bf0054c7afcea9d81e
3
+ size 419529594
meta-llama/checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ac14f3fba2f8dfb3505a8929a9445f622a72695503379751997f3c8b8a44e5
3
+ size 14244
meta-llama/checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81007ec48272bbdc4f9622c046f9c026bf8120ed11d1398fd97bb5168a6f3dda
3
+ size 1064
meta-llama/checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
meta-llama/checkpoint-100/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
meta-llama/checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "__type": "AddedToken",
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "clean_up_tokenization_spaces": false,
11
+ "eos_token": {
12
+ "__type": "AddedToken",
13
+ "content": "</s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "legacy": false,
20
+ "model_max_length": 1000000000000000019884624838656,
21
+ "pad_token": null,
22
+ "padding_side": "right",
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
meta-llama/checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.3255813953488373,
5
+ "global_step": 100,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.58,
12
+ "learning_rate": 0.0002,
13
+ "loss": 1.1968,
14
+ "step": 25
15
+ },
16
+ {
17
+ "epoch": 1.16,
18
+ "learning_rate": 0.0002,
19
+ "loss": 1.062,
20
+ "step": 50
21
+ },
22
+ {
23
+ "epoch": 1.74,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.9718,
26
+ "step": 75
27
+ },
28
+ {
29
+ "epoch": 2.33,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.9553,
32
+ "step": 100
33
+ }
34
+ ],
35
+ "max_steps": 430,
36
+ "num_train_epochs": 10,
37
+ "total_flos": 4479556048896000.0,
38
+ "trial_name": null,
39
+ "trial_params": null
40
+ }
meta-llama/checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b11ec1759aa0c0a5f5c51c199bc92dd49871ce61b982074bb38eda12e020d993
3
+ size 4408
meta-llama/checkpoint-125/README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float16
17
+
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: False
27
+ - bnb_4bit_compute_dtype: float16
28
+ ### Framework versions
29
+
30
+ - PEFT 0.4.0
31
+
32
+ - PEFT 0.4.0
meta-llama/checkpoint-125/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
meta-llama/checkpoint-125/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bb4247f3753dc319cc07e9e8049efb3c97cc563746502635e61989929bc41ba
3
+ size 209773322
meta-llama/checkpoint-125/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09cb8a46483abd0648bce0bea267b7a6faef64bfc8e509df68bc58fcb87e94fd
3
+ size 419529594
meta-llama/checkpoint-125/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cf0c75fee6591b624e3e6dd729f2dcdf3f7853d594a3ba1ef0406236475273d
3
+ size 14244
meta-llama/checkpoint-125/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efdbfe8676cd24a75fed7e6e38125bb1bb838a4b70c0fbf7469557659d9b1fec
3
+ size 1064
meta-llama/checkpoint-125/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
meta-llama/checkpoint-125/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
meta-llama/checkpoint-125/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "__type": "AddedToken",
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "clean_up_tokenization_spaces": false,
11
+ "eos_token": {
12
+ "__type": "AddedToken",
13
+ "content": "</s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "legacy": false,
20
+ "model_max_length": 1000000000000000019884624838656,
21
+ "pad_token": null,
22
+ "padding_side": "right",
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
meta-llama/checkpoint-125/trainer_state.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.9069767441860463,
5
+ "global_step": 125,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.58,
12
+ "learning_rate": 0.0002,
13
+ "loss": 1.1968,
14
+ "step": 25
15
+ },
16
+ {
17
+ "epoch": 1.16,
18
+ "learning_rate": 0.0002,
19
+ "loss": 1.062,
20
+ "step": 50
21
+ },
22
+ {
23
+ "epoch": 1.74,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.9718,
26
+ "step": 75
27
+ },
28
+ {
29
+ "epoch": 2.33,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.9553,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 2.91,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.9116,
38
+ "step": 125
39
+ }
40
+ ],
41
+ "max_steps": 430,
42
+ "num_train_epochs": 10,
43
+ "total_flos": 5489148786524160.0,
44
+ "trial_name": null,
45
+ "trial_params": null
46
+ }
meta-llama/checkpoint-125/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b11ec1759aa0c0a5f5c51c199bc92dd49871ce61b982074bb38eda12e020d993
3
+ size 4408
meta-llama/checkpoint-150/README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float16
17
+
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: False
27
+ - bnb_4bit_compute_dtype: float16
28
+ ### Framework versions
29
+
30
+ - PEFT 0.4.0
31
+
32
+ - PEFT 0.4.0
meta-llama/checkpoint-150/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
meta-llama/checkpoint-150/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e72a2b3b4a2cdbd4b202366eb39dd54635941c86a76fe48371d30f25814b0790
3
+ size 209773322
meta-llama/checkpoint-150/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16c245e3659f4d3a4ef03970d18f6ace9c9783ccd800ff27055d9b416c0ea23f
3
+ size 419529594
meta-llama/checkpoint-150/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aaf18b1cd354830303b27d23b73d23f40903a67c6736ed3101c1d30152ec241
3
+ size 14244
meta-llama/checkpoint-150/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d31dc31a119769737d72f3df4c8cdf99522596cafc12bf2eea05a4ff374f599c
3
+ size 1064
meta-llama/checkpoint-150/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
meta-llama/checkpoint-150/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
meta-llama/checkpoint-150/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "__type": "AddedToken",
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "clean_up_tokenization_spaces": false,
11
+ "eos_token": {
12
+ "__type": "AddedToken",
13
+ "content": "</s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "legacy": false,
20
+ "model_max_length": 1000000000000000019884624838656,
21
+ "pad_token": null,
22
+ "padding_side": "right",
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
meta-llama/checkpoint-150/trainer_state.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.488372093023256,
5
+ "global_step": 150,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.58,
12
+ "learning_rate": 0.0002,
13
+ "loss": 1.1968,
14
+ "step": 25
15
+ },
16
+ {
17
+ "epoch": 1.16,
18
+ "learning_rate": 0.0002,
19
+ "loss": 1.062,
20
+ "step": 50
21
+ },
22
+ {
23
+ "epoch": 1.74,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.9718,
26
+ "step": 75
27
+ },
28
+ {
29
+ "epoch": 2.33,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.9553,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 2.91,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.9116,
38
+ "step": 125
39
+ },
40
+ {
41
+ "epoch": 3.49,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.7707,
44
+ "step": 150
45
+ }
46
+ ],
47
+ "max_steps": 430,
48
+ "num_train_epochs": 10,
49
+ "total_flos": 6575736353955840.0,
50
+ "trial_name": null,
51
+ "trial_params": null
52
+ }
meta-llama/checkpoint-150/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b11ec1759aa0c0a5f5c51c199bc92dd49871ce61b982074bb38eda12e020d993
3
+ size 4408
meta-llama/checkpoint-175/README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float16
17
+
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: False
27
+ - bnb_4bit_compute_dtype: float16
28
+ ### Framework versions
29
+
30
+ - PEFT 0.4.0
31
+
32
+ - PEFT 0.4.0
meta-llama/checkpoint-175/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
meta-llama/checkpoint-175/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d2e64e31eba4011586cfe2b5f78537255f5247ebbd3b21a3589e6d107659c63
3
+ size 209773322
meta-llama/checkpoint-175/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3301fd7e29ceb2d01512a3d84171d1b83b451e0a3aefe53ce1afdf7be9ce1aa4
3
+ size 419529594
meta-llama/checkpoint-175/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e303335faa55ba3e1a9e5331a5eb3fbabdc3edcecc6d93685ff70b78753dc8c
3
+ size 14244
meta-llama/checkpoint-175/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c17c000dd3ad474c8365253b4464489310fed0d13c745556138280174b380deb
3
+ size 1064
meta-llama/checkpoint-175/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
meta-llama/checkpoint-175/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
meta-llama/checkpoint-175/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "__type": "AddedToken",
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "clean_up_tokenization_spaces": false,
11
+ "eos_token": {
12
+ "__type": "AddedToken",
13
+ "content": "</s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "legacy": false,
20
+ "model_max_length": 1000000000000000019884624838656,
21
+ "pad_token": null,
22
+ "padding_side": "right",
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
meta-llama/checkpoint-175/trainer_state.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.069767441860465,
5
+ "global_step": 175,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.58,
12
+ "learning_rate": 0.0002,
13
+ "loss": 1.1968,
14
+ "step": 25
15
+ },
16
+ {
17
+ "epoch": 1.16,
18
+ "learning_rate": 0.0002,
19
+ "loss": 1.062,
20
+ "step": 50
21
+ },
22
+ {
23
+ "epoch": 1.74,
24
+ "learning_rate": 0.0002,
25
+ "loss": 0.9718,
26
+ "step": 75
27
+ },
28
+ {
29
+ "epoch": 2.33,
30
+ "learning_rate": 0.0002,
31
+ "loss": 0.9553,
32
+ "step": 100
33
+ },
34
+ {
35
+ "epoch": 2.91,
36
+ "learning_rate": 0.0002,
37
+ "loss": 0.9116,
38
+ "step": 125
39
+ },
40
+ {
41
+ "epoch": 3.49,
42
+ "learning_rate": 0.0002,
43
+ "loss": 0.7707,
44
+ "step": 150
45
+ },
46
+ {
47
+ "epoch": 4.07,
48
+ "learning_rate": 0.0002,
49
+ "loss": 0.7219,
50
+ "step": 175
51
+ }
52
+ ],
53
+ "max_steps": 430,
54
+ "num_train_epochs": 10,
55
+ "total_flos": 7749710691409920.0,
56
+ "trial_name": null,
57
+ "trial_params": null
58
+ }
meta-llama/checkpoint-175/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b11ec1759aa0c0a5f5c51c199bc92dd49871ce61b982074bb38eda12e020d993
3
+ size 4408
meta-llama/checkpoint-200/README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float16
17
+
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: False
27
+ - bnb_4bit_compute_dtype: float16
28
+ ### Framework versions
29
+
30
+ - PEFT 0.4.0
31
+
32
+ - PEFT 0.4.0
meta-llama/checkpoint-200/adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "meta-llama/Llama-2-13b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
meta-llama/checkpoint-200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:266d6fb26200777f133bdda94742951f6b8788f8aa7c0b2dfd0d83634f6b8b19
3
+ size 209773322
meta-llama/checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b80a3c2cb442707f6933e7dc748fc33c9058b2e8cb2d492645c3a778b34e098
3
+ size 419529594
meta-llama/checkpoint-200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1905582d93ffc2ba4b4b3e6bbd53985af238503fc9b18c6885eec1c421fb69cd
3
+ size 14244
meta-llama/checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:859ff0676471245c9481ca25d6d6778d1c7963c39b7877af46bb8ca30a9ead21
3
+ size 1064