binhquoc commited on
Commit
87ff205
1 Parent(s): 6782dbb

Upload model

Browse files
Files changed (3) hide show
  1. README.md +6 -6
  2. adapter_config.json +3 -3
  3. adapter_model.bin +2 -2
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  library_name: peft
3
- base_model: daryl149/llama-2-7b-chat-hf
4
  ---
5
 
6
  # Model Card for Model ID
@@ -204,15 +204,15 @@ Carbon emissions can be estimated using the [Machine Learning Impact calculator]
204
 
205
  The following `bitsandbytes` quantization config was used during training:
206
  - quant_method: bitsandbytes
207
- - load_in_8bit: True
208
- - load_in_4bit: False
209
  - llm_int8_threshold: 6.0
210
  - llm_int8_skip_modules: None
211
  - llm_int8_enable_fp32_cpu_offload: False
212
  - llm_int8_has_fp16_weight: False
213
- - bnb_4bit_quant_type: fp4
214
- - bnb_4bit_use_double_quant: False
215
- - bnb_4bit_compute_dtype: float32
216
 
217
  ### Framework versions
218
 
 
1
  ---
2
  library_name: peft
3
+ base_model: daryl149/llama-2-13b-chat-hf
4
  ---
5
 
6
  # Model Card for Model ID
 
204
 
205
  The following `bitsandbytes` quantization config was used during training:
206
  - quant_method: bitsandbytes
207
+ - load_in_8bit: False
208
+ - load_in_4bit: True
209
  - llm_int8_threshold: 6.0
210
  - llm_int8_skip_modules: None
211
  - llm_int8_enable_fp32_cpu_offload: False
212
  - llm_int8_has_fp16_weight: False
213
+ - bnb_4bit_quant_type: nf4
214
+ - bnb_4bit_use_double_quant: True
215
+ - bnb_4bit_compute_dtype: bfloat16
216
 
217
  ### Framework versions
218
 
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "daryl149/llama-2-7b-chat-hf",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -16,8 +16,8 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_proj",
20
- "q_proj"
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "daryl149/llama-2-13b-chat-hf",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "q_proj",
20
+ "v_proj"
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5137fa5845d6b4e1fa75904656e0e1d392aeb1da3511ca329e3be50883343f9f
3
- size 16822989
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e29766bb33c7d2a5fdb47ece13962d9cf4d20a4a9ac34a46b6968c084bfc9eee
3
+ size 26271757