matthewchung74 commited on
Commit
68a6a6b
1 Parent(s): 6c6f774

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. README.md +19 -0
  2. adapter_config.json +6 -6
README.md CHANGED
@@ -216,4 +216,23 @@ The following `bitsandbytes` quantization config was used during training:
216
  ### Framework versions
217
 
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  - PEFT 0.7.0.dev0
 
216
  ### Framework versions
217
 
218
 
219
+ - PEFT 0.7.0.dev0
220
+ ## Training procedure
221
+
222
+
223
+ The following `bitsandbytes` quantization config was used during training:
224
+ - quant_method: bitsandbytes
225
+ - load_in_8bit: False
226
+ - load_in_4bit: True
227
+ - llm_int8_threshold: 6.0
228
+ - llm_int8_skip_modules: None
229
+ - llm_int8_enable_fp32_cpu_offload: False
230
+ - llm_int8_has_fp16_weight: False
231
+ - bnb_4bit_quant_type: nf4
232
+ - bnb_4bit_use_double_quant: True
233
+ - bnb_4bit_compute_dtype: bfloat16
234
+
235
+ ### Framework versions
236
+
237
+
238
  - PEFT 0.7.0.dev0
adapter_config.json CHANGED
@@ -16,14 +16,14 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "up_proj",
20
- "lm_head",
 
21
  "o_proj",
 
22
  "q_proj",
23
- "v_proj",
24
- "k_proj",
25
- "gate_proj",
26
- "down_proj"
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "gate_proj",
20
+ "down_proj",
21
+ "k_proj",
22
  "o_proj",
23
+ "lm_head",
24
  "q_proj",
25
+ "up_proj",
26
+ "v_proj"
 
 
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }