talazz commited on
Commit
091bcd8
1 Parent(s): 1a8d06a

your-username/llama2_finetuned

Browse files
README.md CHANGED
@@ -4,7 +4,7 @@ tags:
4
  - trl
5
  - sft
6
  - generated_from_trainer
7
- base_model: ybelkada/falcon-7b-sharded-bf16
8
  model-index:
9
  - name: results
10
  results: []
@@ -13,10 +13,12 @@ model-index:
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
16
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/team2001/falcon/runs/g89n84q2)
17
  # results
18
 
19
- This model is a fine-tuned version of [ybelkada/falcon-7b-sharded-bf16](https://huggingface.co/ybelkada/falcon-7b-sharded-bf16) on an unknown dataset.
 
 
20
 
21
  ## Model description
22
 
@@ -35,20 +37,20 @@ More information needed
35
  ### Training hyperparameters
36
 
37
  The following hyperparameters were used during training:
38
- - learning_rate: 5e-05
39
  - train_batch_size: 4
40
- - eval_batch_size: 8
41
  - seed: 42
42
- - gradient_accumulation_steps: 4
43
- - total_train_batch_size: 16
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: constant
46
- - lr_scheduler_warmup_ratio: 0.03
47
  - num_epochs: 1
48
- - mixed_precision_training: Native AMP
49
 
50
  ### Training results
51
 
 
 
 
52
 
53
 
54
  ### Framework versions
 
4
  - trl
5
  - sft
6
  - generated_from_trainer
7
+ base_model: TinyPixel/Llama-2-7B-bf16-sharded
8
  model-index:
9
  - name: results
10
  results: []
 
13
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
  should probably proofread and complete it, then remove this comment. -->
15
 
16
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/team2001/llama/runs/2it381ub)
17
  # results
18
 
19
+ This model is a fine-tuned version of [TinyPixel/Llama-2-7B-bf16-sharded](https://huggingface.co/TinyPixel/Llama-2-7B-bf16-sharded) on an unknown dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.5778
22
 
23
  ## Model description
24
 
 
37
  ### Training hyperparameters
38
 
39
  The following hyperparameters were used during training:
40
+ - learning_rate: 0.0004
41
  - train_batch_size: 4
42
+ - eval_batch_size: 4
43
  - seed: 42
 
 
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: constant
46
+ - lr_scheduler_warmup_steps: 100
47
  - num_epochs: 1
 
48
 
49
  ### Training results
50
 
51
+ | Training Loss | Epoch | Step | Validation Loss |
52
+ |:-------------:|:-----:|:----:|:---------------:|
53
+ | 0.3877 | 1.0 | 4174 | 0.5778 |
54
 
55
 
56
  ### Framework versions
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "ybelkada/falcon-7b-sharded-bf16",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -20,10 +20,14 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "dense",
24
- "dense_4h_to_h",
25
- "dense_h_to_4h",
26
- "query_key_value"
 
 
 
 
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyPixel/Llama-2-7B-bf16-sharded",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "o_proj",
24
+ "gate_proj",
25
+ "down_proj",
26
+ "up_proj",
27
+ "k_proj",
28
+ "q_proj",
29
+ "v_proj",
30
+ "lm_head"
31
  ],
32
  "task_type": "CAUSAL_LM",
33
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c199d8563d2475ada86114386192ba9d6addf3af7a9bd49d15215bac0cf7452
3
- size 130583912
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:298801c2a7f16cd43893e849890d15da9c3236f7553ed86b58d1775024830910
3
+ size 686566992
special_tokens_map.json CHANGED
@@ -1,23 +1,24 @@
1
  {
2
- "additional_special_tokens": [
3
- ">>TITLE<<",
4
- ">>ABSTRACT<<",
5
- ">>INTRODUCTION<<",
6
- ">>SUMMARY<<",
7
- ">>COMMENT<<",
8
- ">>ANSWER<<",
9
- ">>QUESTION<<",
10
- ">>DOMAIN<<",
11
- ">>PREFIX<<",
12
- ">>SUFFIX<<",
13
- ">>MIDDLE<<"
14
- ],
15
  "eos_token": {
16
- "content": "<|endoftext|>",
17
  "lstrip": false,
18
- "normalized": false,
19
  "rstrip": false,
20
  "single_word": false
21
  },
22
- "pad_token": "<|endoftext|>"
 
 
 
 
 
 
 
23
  }
 
1
  {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
 
 
 
 
 
 
9
  "eos_token": {
10
+ "content": "</s>",
11
  "lstrip": false,
12
+ "normalized": true,
13
  "rstrip": false,
14
  "single_word": false
15
  },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
- size 493443
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json CHANGED
@@ -1,119 +1,39 @@
1
  {
2
- "add_prefix_space": false,
 
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": ">>TITLE<<",
6
  "lstrip": false,
7
- "normalized": false,
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
11
  },
12
  "1": {
13
- "content": ">>ABSTRACT<<",
14
  "lstrip": false,
15
- "normalized": false,
16
  "rstrip": false,
17
  "single_word": false,
18
  "special": true
19
  },
20
  "2": {
21
- "content": ">>INTRODUCTION<<",
22
  "lstrip": false,
23
- "normalized": false,
24
- "rstrip": false,
25
- "single_word": false,
26
- "special": true
27
- },
28
- "3": {
29
- "content": ">>SUMMARY<<",
30
- "lstrip": false,
31
- "normalized": false,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": true
35
- },
36
- "4": {
37
- "content": ">>COMMENT<<",
38
- "lstrip": false,
39
- "normalized": false,
40
- "rstrip": false,
41
- "single_word": false,
42
- "special": true
43
- },
44
- "5": {
45
- "content": ">>ANSWER<<",
46
- "lstrip": false,
47
- "normalized": false,
48
- "rstrip": false,
49
- "single_word": false,
50
- "special": true
51
- },
52
- "6": {
53
- "content": ">>QUESTION<<",
54
- "lstrip": false,
55
- "normalized": false,
56
- "rstrip": false,
57
- "single_word": false,
58
- "special": true
59
- },
60
- "7": {
61
- "content": ">>DOMAIN<<",
62
- "lstrip": false,
63
- "normalized": false,
64
- "rstrip": false,
65
- "single_word": false,
66
- "special": true
67
- },
68
- "8": {
69
- "content": ">>PREFIX<<",
70
- "lstrip": false,
71
- "normalized": false,
72
- "rstrip": false,
73
- "single_word": false,
74
- "special": true
75
- },
76
- "9": {
77
- "content": ">>SUFFIX<<",
78
- "lstrip": false,
79
- "normalized": false,
80
- "rstrip": false,
81
- "single_word": false,
82
- "special": true
83
- },
84
- "10": {
85
- "content": ">>MIDDLE<<",
86
- "lstrip": false,
87
- "normalized": false,
88
- "rstrip": false,
89
- "single_word": false,
90
- "special": true
91
- },
92
- "11": {
93
- "content": "<|endoftext|>",
94
- "lstrip": false,
95
- "normalized": false,
96
  "rstrip": false,
97
  "single_word": false,
98
  "special": true
99
  }
100
  },
101
- "additional_special_tokens": [
102
- ">>TITLE<<",
103
- ">>ABSTRACT<<",
104
- ">>INTRODUCTION<<",
105
- ">>SUMMARY<<",
106
- ">>COMMENT<<",
107
- ">>ANSWER<<",
108
- ">>QUESTION<<",
109
- ">>DOMAIN<<",
110
- ">>PREFIX<<",
111
- ">>SUFFIX<<",
112
- ">>MIDDLE<<"
113
- ],
114
- "clean_up_tokenization_spaces": true,
115
- "eos_token": "<|endoftext|>",
116
- "model_max_length": 2048,
117
- "pad_token": "<|endoftext|>",
118
- "tokenizer_class": "PreTrainedTokenizerFast"
119
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
+ "content": "<unk>",
7
  "lstrip": false,
8
+ "normalized": true,
9
  "rstrip": false,
10
  "single_word": false,
11
  "special": true
12
  },
13
  "1": {
14
+ "content": "<s>",
15
  "lstrip": false,
16
+ "normalized": true,
17
  "rstrip": false,
18
  "single_word": false,
19
  "special": true
20
  },
21
  "2": {
22
+ "content": "</s>",
23
  "lstrip": false,
24
+ "normalized": true,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
28
  }
29
  },
30
+ "bos_token": "<s>",
31
+ "clean_up_tokenization_spaces": false,
32
+ "eos_token": "</s>",
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "</s>",
35
+ "sp_model_kwargs": {},
36
+ "tokenizer_class": "LlamaTokenizer",
37
+ "unk_token": "<unk>",
38
+ "use_default_system_prompt": false
 
 
 
 
 
 
 
 
 
39
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1311408a6959dd88543c60097d041cb07fac4a54a00bc9e684287e00039940d3
3
- size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:081d01a8b69f8dfc82774bb9978d2baff39d267842d3d394cf2223e9e630d3ab
3
+ size 5048