Files changed (3) hide show
  1. README.md +1 -5
  2. adapter_config.json +4 -4
  3. adapter_model.bin +2 -2
README.md CHANGED
@@ -1,9 +1,5 @@
1
  ---
2
  library_name: peft
3
- license: apache-2.0
4
- datasets:
5
- - tatsu-lab/alpaca
6
- pipeline_tag: text2text-generation
7
  ---
8
  ## Training procedure
9
 
@@ -21,4 +17,4 @@ The following `bitsandbytes` quantization config was used during training:
21
  ### Framework versions
22
 
23
 
24
- - PEFT 0.5.0.dev0
 
1
  ---
2
  library_name: peft
 
 
 
 
3
  ---
4
  ## Training procedure
5
 
 
17
  ### Framework versions
18
 
19
 
20
+ - PEFT 0.5.0.dev0
adapter_config.json CHANGED
@@ -1,17 +1,17 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "NousResearch/Llama-2-7b-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
- "lora_alpha": 16,
11
- "lora_dropout": 0.1,
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
- "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
  "q_proj",
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "NousResearch/Llama-2-7b-chat-hf",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
7
  "init_lora_weights": true,
8
  "layers_pattern": null,
9
  "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
+ "r": 16,
15
  "revision": null,
16
  "target_modules": [
17
  "q_proj",
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2dc57b0303e62c58c45bc1c2d9925f1614ca807950d3ab0318bf4c98ed00877b
3
- size 134263757
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be33b1f760ec6fa03b09d7de495086f0882faa740c7585eabfaecf8eb64c1232
3
+ size 33600461