xianchaowu commited on
Commit
47787a3
1 Parent(s): f05e11e

upload lazy lora for llama1-33b

Browse files
Files changed (4) hide show
  1. README.md +157 -0
  2. adapter_config.json +455 -0
  3. adapter_model.bin +3 -0
  4. usage.py +51 -0
README.md CHANGED
@@ -1,3 +1,160 @@
1
  ---
2
  license: llama2
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: llama2
3
  ---
4
+
5
+ ## Lazy LoRA
6
+
7
+ ### Benefits
8
+
9
+ 0. using the (former, since 33b model is not included in llama-2 for the public)[Meta's LLaMA-1 models](https://huggingface.co/huggyllama/llama-30b).
10
+ 1. support [4-bit qlora](https://arxiv.org/abs/2305.14314), extreme GPU memory and inference time saving;
11
+ 2. comparable (slightly worse, mainly due to 4-bit) MMLU evaluation dataset results, llama1-33b's 57.8% to our 56.97% (-0.83%).
12
+ 3. This lazy-lora adapter is based on [Meta's LLaMA-1](https://huggingface.co/huggyllama/llama-30b), and using the [oasst1 dataset](https://huggingface.co/datasets/OpenAssistant/oasst1), following [Guanaco](https://huggingface.co/timdettmers/guanaco-65b).
13
+
14
+ ### Introduction
15
+ Determine the rank of LoRA layers by the singular values of pretrained weight matrices.
16
+ Also, combines:
17
+ 1. LoRA: [LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/abs/2106.09685)
18
+ 2. Prefix Tuning: [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://aclanthology.org/2021.acl-long.3
19
+ 53/), [P-Tuning v2: Prompt Tuning Can Be Comparable to Fine-tuning Universally Across Scales and Tasks](https://arxiv.or
20
+ g/pdf/2110.07602.pdf)
21
+ 3. Prompt Tuning: [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691)
22
+ 4. LLaMA adapter: [LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention] (https://arxiv.org/abs/2303.16199)
23
+ in one model.
24
+
25
+ This allows you to perform LoRA (additional low rank adapters inserted to each linear layer), and prompt learning (additional virtual tokens attached to the input and to the attention layers acting as `past_key_values`)
26
+
27
+ ## Usage:
28
+ ```python
29
+ import sys
30
+ sys.path.insert(1, '/workspace/asr/peft/src')
31
+ # TODO set this path to the lazy-lora source code path,
32
+ # or you can install it from source code:
33
+ # TODO, please install lazylora for usage:
34
+ # git clone [email protected]:Xianchao-Wu/peft.git
35
+ # cd peft
36
+ # python setup.py install
37
+
38
+ from transformers import (AutoTokenizer,
39
+ AutoModelForCausalLM, BitsAndBytesConfig)
40
+ from peft import PeftModel, PeftConfig
41
+ import os
42
+ import torch
43
+
44
+ #import ipdb; ipdb.set_trace()
45
+ cache_dir="/workspace/asr/peft/qlora"
46
+ # TODO set this cache_dir to the path where you
47
+ # stored (or, want to store) llama1-33b model
48
+
49
+ lazylora_dir=os.getcwd()
50
+ # the path that contains 'adapter_config.json'
51
+ # and 'adapter_model.bin'
52
+
53
+ config = PeftConfig.from_pretrained(lazylora_dir)
54
+
55
+ tokenizer = AutoTokenizer.from_pretrained(
56
+ config.base_model_name_or_path,
57
+ cache_dir=cache_dir,
58
+ use_auth_token=True
59
+ )
60
+
61
+ bnb_config = BitsAndBytesConfig(
62
+ load_in_4bit=True,
63
+ bnb_4bit_use_double_quant=True,
64
+ bnb_4bit_quant_type='nf4',
65
+ bnb_4bit_compute_dtype=torch.bfloat16
66
+ )
67
+
68
+ model = AutoModelForCausalLM.from_pretrained(
69
+ config.base_model_name_or_path,
70
+ quantization_config=bnb_config,
71
+ device_map="auto",
72
+ cache_dir=cache_dir,
73
+ use_auth_token=True
74
+ )
75
+ #model.print_trainable_parameters()
76
+ print(sum(p.numel() for p in model.parameters()))
77
+ # 16,477,866,496 -> half-size of 33B due to 4-bit loading
78
+
79
+ model = PeftModel.from_pretrained(model, lazylora_dir)
80
+ print('after adding lazy lora parameters:')
81
+ model.print_trainable_parameters()
82
+ # trainable params: 0 || all params: 16,965,645,824 || trainable%: 0.0
83
+ ```
84
+
85
+ ## MMLU result:
86
+
87
+ ```json
88
+ {"mmlu_loss": 2.6712945443520275,
89
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
90
+ "mmlu_eval_accuracy_philosophy": 0.7647058823529411,
91
+ "mmlu_eval_accuracy_virology": 0.3888888888888889,
92
+ "mmlu_eval_accuracy_high_school_european_history": 0.8333333333333334,
93
+ "mmlu_eval_accuracy_astronomy": 0.6875,
94
+ "mmlu_eval_accuracy_elementary_mathematics": 0.34146341463414637,
95
+ "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
96
+ "mmlu_eval_accuracy_computer_security": 0.8181818181818182,
97
+ "mmlu_eval_accuracy_anatomy": 0.5,
98
+ "mmlu_eval_accuracy_high_school_physics": 0.23529411764705882,
99
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.7619047619047619,
100
+ "mmlu_eval_accuracy_global_facts": 0.4,
101
+ "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
102
+ "mmlu_eval_accuracy_security_studies": 0.7037037037037037,
103
+ "mmlu_eval_accuracy_world_religions": 0.8421052631578947,
104
+ "mmlu_eval_accuracy_professional_medicine": 0.7096774193548387,
105
+ "mmlu_eval_accuracy_management": 0.9090909090909091,
106
+ "mmlu_eval_accuracy_marketing": 0.8,
107
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
108
+ "mmlu_eval_accuracy_professional_law": 0.4294117647058823,
109
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
110
+ "mmlu_eval_accuracy_high_school_psychology": 0.8333333333333334,
111
+ "mmlu_eval_accuracy_moral_disputes": 0.5789473684210527,
112
+ "mmlu_eval_accuracy_professional_accounting": 0.45161290322580644,
113
+ "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
114
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
115
+ "mmlu_eval_accuracy_nutrition": 0.7272727272727273,
116
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
117
+ "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
118
+ "mmlu_eval_accuracy_prehistory": 0.5714285714285714,
119
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
120
+ "mmlu_eval_accuracy_jurisprudence": 0.5454545454545454,
121
+ "mmlu_eval_accuracy_moral_scenarios": 0.4,
122
+ "mmlu_eval_accuracy_sociology": 0.8181818181818182,
123
+ "mmlu_eval_accuracy_college_biology": 0.5,
124
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
125
+ "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
126
+ "mmlu_eval_accuracy_high_school_computer_science": 0.6666666666666666,
127
+ "mmlu_eval_accuracy_electrical_engineering": 0.3125,
128
+ "mmlu_eval_accuracy_medical_genetics": 0.8181818181818182,
129
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
130
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.5813953488372093,
131
+ "mmlu_eval_accuracy_college_medicine": 0.5,
132
+ "mmlu_eval_accuracy_high_school_world_history": 0.6923076923076923,
133
+ "mmlu_eval_accuracy_high_school_mathematics": 0.3448275862068966,
134
+ "mmlu_eval_accuracy_international_law": 0.9230769230769231,
135
+ "mmlu_eval_accuracy_miscellaneous": 0.7558139534883721,
136
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
137
+ "mmlu_eval_accuracy_professional_psychology": 0.5942028985507246,
138
+ "mmlu_eval_accuracy_econometrics": 0.4166666666666667,
139
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.5384615384615384,
140
+ "mmlu_eval_accuracy_us_foreign_policy": 0.9090909090909091,
141
+ "mmlu_eval_accuracy_machine_learning": 0.45454545454545453,
142
+ "mmlu_eval_accuracy_high_school_biology": 0.53125,
143
+ "mmlu_eval_accuracy_formal_logic": 0.14285714285714285,
144
+ "mmlu_eval_accuracy_high_school_us_history": 0.8636363636363636,
145
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
146
+ "mmlu_eval_accuracy": 0.5696901987706997,
147
+ "epoch": 3.05}
148
+ ```
149
+
150
+ ## License and intended use
151
+
152
+ This lazy-lora adapter is based on [Meta's LLaMA1-33b, huggyllama/llama-30b](https://huggingface.co/huggyllama/llama-30b), and using the [oasst1 dataset](https://huggingface.co/datasets/OpenAssistant/oasst1), following [Guanaco](https://huggingface.co/timdettmers/guanaco-65b).
153
+
154
+ lazy lora adapter weights are available under LLAMA-2 license. Note the use of the lazy lora adapter weights, requires access to the LLaMA model weighs. Lazy lora is based on LLaMA and therefore should be used according to the LLaMA license.
155
+
156
+
157
+ ## Risks and Biases
158
+
159
+ The model can produce factually incorrect output, and should not be relied on to produce factually accurate information. The model was trained on various public datasets; it is possible that this model could generate lewd, biased, or otherwise offensive outputs.
160
+
adapter_config.json ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "huggyllama/llama-30b",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lazy_lora_weights": true,
7
+ "is_r_by_svd": true,
8
+ "is_r_reuse": true,
9
+ "lazy_lora_alpha": 16.0,
10
+ "lazy_lora_dropout": 0.05,
11
+ "lazy_pre_adapter_type": "none",
12
+ "lazy_pre_lora_alpha": 0.1,
13
+ "modules_to_save": null,
14
+ "num_attention_heads": 52,
15
+ "num_layers": 60,
16
+ "num_transformer_submodules": 1,
17
+ "num_virtual_tokens": null,
18
+ "peft_type": "LAZY_LORA",
19
+ "prefix_tuning_config": null,
20
+ "prompt_tuning_config": null,
21
+ "r": 64,
22
+ "r_by_module_dict": {
23
+ "model.layers.0.mlp.down_proj": 55,
24
+ "model.layers.0.mlp.gate_proj": 28,
25
+ "model.layers.0.mlp.up_proj": 29,
26
+ "model.layers.0.self_attn.k_proj": 36,
27
+ "model.layers.0.self_attn.o_proj": 29,
28
+ "model.layers.0.self_attn.q_proj": 39,
29
+ "model.layers.0.self_attn.v_proj": 28,
30
+ "model.layers.1.mlp.down_proj": 61,
31
+ "model.layers.1.mlp.gate_proj": 49,
32
+ "model.layers.1.mlp.up_proj": 50,
33
+ "model.layers.1.self_attn.k_proj": 60,
34
+ "model.layers.1.self_attn.o_proj": 36,
35
+ "model.layers.1.self_attn.q_proj": 59,
36
+ "model.layers.1.self_attn.v_proj": 33,
37
+ "model.layers.10.mlp.down_proj": 63,
38
+ "model.layers.10.mlp.gate_proj": 64,
39
+ "model.layers.10.mlp.up_proj": 64,
40
+ "model.layers.10.self_attn.k_proj": 79,
41
+ "model.layers.10.self_attn.o_proj": 58,
42
+ "model.layers.10.self_attn.q_proj": 78,
43
+ "model.layers.10.self_attn.v_proj": 58,
44
+ "model.layers.11.mlp.down_proj": 63,
45
+ "model.layers.11.mlp.gate_proj": 64,
46
+ "model.layers.11.mlp.up_proj": 65,
47
+ "model.layers.11.self_attn.k_proj": 77,
48
+ "model.layers.11.self_attn.o_proj": 59,
49
+ "model.layers.11.self_attn.q_proj": 75,
50
+ "model.layers.11.self_attn.v_proj": 59,
51
+ "model.layers.12.mlp.down_proj": 62,
52
+ "model.layers.12.mlp.gate_proj": 64,
53
+ "model.layers.12.mlp.up_proj": 65,
54
+ "model.layers.12.self_attn.k_proj": 77,
55
+ "model.layers.12.self_attn.o_proj": 59,
56
+ "model.layers.12.self_attn.q_proj": 76,
57
+ "model.layers.12.self_attn.v_proj": 59,
58
+ "model.layers.13.mlp.down_proj": 63,
59
+ "model.layers.13.mlp.gate_proj": 63,
60
+ "model.layers.13.mlp.up_proj": 65,
61
+ "model.layers.13.self_attn.k_proj": 76,
62
+ "model.layers.13.self_attn.o_proj": 58,
63
+ "model.layers.13.self_attn.q_proj": 75,
64
+ "model.layers.13.self_attn.v_proj": 58,
65
+ "model.layers.14.mlp.down_proj": 63,
66
+ "model.layers.14.mlp.gate_proj": 63,
67
+ "model.layers.14.mlp.up_proj": 65,
68
+ "model.layers.14.self_attn.k_proj": 76,
69
+ "model.layers.14.self_attn.o_proj": 58,
70
+ "model.layers.14.self_attn.q_proj": 74,
71
+ "model.layers.14.self_attn.v_proj": 59,
72
+ "model.layers.15.mlp.down_proj": 63,
73
+ "model.layers.15.mlp.gate_proj": 63,
74
+ "model.layers.15.mlp.up_proj": 65,
75
+ "model.layers.15.self_attn.k_proj": 77,
76
+ "model.layers.15.self_attn.o_proj": 61,
77
+ "model.layers.15.self_attn.q_proj": 76,
78
+ "model.layers.15.self_attn.v_proj": 61,
79
+ "model.layers.16.mlp.down_proj": 63,
80
+ "model.layers.16.mlp.gate_proj": 63,
81
+ "model.layers.16.mlp.up_proj": 65,
82
+ "model.layers.16.self_attn.k_proj": 77,
83
+ "model.layers.16.self_attn.o_proj": 61,
84
+ "model.layers.16.self_attn.q_proj": 76,
85
+ "model.layers.16.self_attn.v_proj": 62,
86
+ "model.layers.17.mlp.down_proj": 63,
87
+ "model.layers.17.mlp.gate_proj": 63,
88
+ "model.layers.17.mlp.up_proj": 66,
89
+ "model.layers.17.self_attn.k_proj": 75,
90
+ "model.layers.17.self_attn.o_proj": 61,
91
+ "model.layers.17.self_attn.q_proj": 74,
92
+ "model.layers.17.self_attn.v_proj": 62,
93
+ "model.layers.18.mlp.down_proj": 64,
94
+ "model.layers.18.mlp.gate_proj": 63,
95
+ "model.layers.18.mlp.up_proj": 66,
96
+ "model.layers.18.self_attn.k_proj": 76,
97
+ "model.layers.18.self_attn.o_proj": 61,
98
+ "model.layers.18.self_attn.q_proj": 74,
99
+ "model.layers.18.self_attn.v_proj": 62,
100
+ "model.layers.19.mlp.down_proj": 64,
101
+ "model.layers.19.mlp.gate_proj": 63,
102
+ "model.layers.19.mlp.up_proj": 66,
103
+ "model.layers.19.self_attn.k_proj": 74,
104
+ "model.layers.19.self_attn.o_proj": 63,
105
+ "model.layers.19.self_attn.q_proj": 73,
106
+ "model.layers.19.self_attn.v_proj": 64,
107
+ "model.layers.2.mlp.down_proj": 62,
108
+ "model.layers.2.mlp.gate_proj": 58,
109
+ "model.layers.2.mlp.up_proj": 58,
110
+ "model.layers.2.self_attn.k_proj": 75,
111
+ "model.layers.2.self_attn.o_proj": 48,
112
+ "model.layers.2.self_attn.q_proj": 75,
113
+ "model.layers.2.self_attn.v_proj": 44,
114
+ "model.layers.20.mlp.down_proj": 64,
115
+ "model.layers.20.mlp.gate_proj": 63,
116
+ "model.layers.20.mlp.up_proj": 66,
117
+ "model.layers.20.self_attn.k_proj": 70,
118
+ "model.layers.20.self_attn.o_proj": 63,
119
+ "model.layers.20.self_attn.q_proj": 70,
120
+ "model.layers.20.self_attn.v_proj": 64,
121
+ "model.layers.21.mlp.down_proj": 64,
122
+ "model.layers.21.mlp.gate_proj": 63,
123
+ "model.layers.21.mlp.up_proj": 66,
124
+ "model.layers.21.self_attn.k_proj": 72,
125
+ "model.layers.21.self_attn.o_proj": 64,
126
+ "model.layers.21.self_attn.q_proj": 71,
127
+ "model.layers.21.self_attn.v_proj": 64,
128
+ "model.layers.22.mlp.down_proj": 65,
129
+ "model.layers.22.mlp.gate_proj": 63,
130
+ "model.layers.22.mlp.up_proj": 65,
131
+ "model.layers.22.self_attn.k_proj": 67,
132
+ "model.layers.22.self_attn.o_proj": 66,
133
+ "model.layers.22.self_attn.q_proj": 66,
134
+ "model.layers.22.self_attn.v_proj": 66,
135
+ "model.layers.23.mlp.down_proj": 65,
136
+ "model.layers.23.mlp.gate_proj": 64,
137
+ "model.layers.23.mlp.up_proj": 66,
138
+ "model.layers.23.self_attn.k_proj": 68,
139
+ "model.layers.23.self_attn.o_proj": 65,
140
+ "model.layers.23.self_attn.q_proj": 67,
141
+ "model.layers.23.self_attn.v_proj": 65,
142
+ "model.layers.24.mlp.down_proj": 65,
143
+ "model.layers.24.mlp.gate_proj": 64,
144
+ "model.layers.24.mlp.up_proj": 65,
145
+ "model.layers.24.self_attn.k_proj": 69,
146
+ "model.layers.24.self_attn.o_proj": 66,
147
+ "model.layers.24.self_attn.q_proj": 69,
148
+ "model.layers.24.self_attn.v_proj": 67,
149
+ "model.layers.25.mlp.down_proj": 65,
150
+ "model.layers.25.mlp.gate_proj": 64,
151
+ "model.layers.25.mlp.up_proj": 65,
152
+ "model.layers.25.self_attn.k_proj": 72,
153
+ "model.layers.25.self_attn.o_proj": 66,
154
+ "model.layers.25.self_attn.q_proj": 71,
155
+ "model.layers.25.self_attn.v_proj": 66,
156
+ "model.layers.26.mlp.down_proj": 65,
157
+ "model.layers.26.mlp.gate_proj": 64,
158
+ "model.layers.26.mlp.up_proj": 65,
159
+ "model.layers.26.self_attn.k_proj": 67,
160
+ "model.layers.26.self_attn.o_proj": 68,
161
+ "model.layers.26.self_attn.q_proj": 67,
162
+ "model.layers.26.self_attn.v_proj": 67,
163
+ "model.layers.27.mlp.down_proj": 65,
164
+ "model.layers.27.mlp.gate_proj": 64,
165
+ "model.layers.27.mlp.up_proj": 64,
166
+ "model.layers.27.self_attn.k_proj": 62,
167
+ "model.layers.27.self_attn.o_proj": 67,
168
+ "model.layers.27.self_attn.q_proj": 62,
169
+ "model.layers.27.self_attn.v_proj": 67,
170
+ "model.layers.28.mlp.down_proj": 65,
171
+ "model.layers.28.mlp.gate_proj": 65,
172
+ "model.layers.28.mlp.up_proj": 65,
173
+ "model.layers.28.self_attn.k_proj": 64,
174
+ "model.layers.28.self_attn.o_proj": 69,
175
+ "model.layers.28.self_attn.q_proj": 64,
176
+ "model.layers.28.self_attn.v_proj": 69,
177
+ "model.layers.29.mlp.down_proj": 65,
178
+ "model.layers.29.mlp.gate_proj": 65,
179
+ "model.layers.29.mlp.up_proj": 64,
180
+ "model.layers.29.self_attn.k_proj": 60,
181
+ "model.layers.29.self_attn.o_proj": 69,
182
+ "model.layers.29.self_attn.q_proj": 61,
183
+ "model.layers.29.self_attn.v_proj": 69,
184
+ "model.layers.3.mlp.down_proj": 62,
185
+ "model.layers.3.mlp.gate_proj": 61,
186
+ "model.layers.3.mlp.up_proj": 60,
187
+ "model.layers.3.self_attn.k_proj": 80,
188
+ "model.layers.3.self_attn.o_proj": 48,
189
+ "model.layers.3.self_attn.q_proj": 78,
190
+ "model.layers.3.self_attn.v_proj": 46,
191
+ "model.layers.30.mlp.down_proj": 65,
192
+ "model.layers.30.mlp.gate_proj": 65,
193
+ "model.layers.30.mlp.up_proj": 64,
194
+ "model.layers.30.self_attn.k_proj": 62,
195
+ "model.layers.30.self_attn.o_proj": 69,
196
+ "model.layers.30.self_attn.q_proj": 62,
197
+ "model.layers.30.self_attn.v_proj": 69,
198
+ "model.layers.31.mlp.down_proj": 65,
199
+ "model.layers.31.mlp.gate_proj": 65,
200
+ "model.layers.31.mlp.up_proj": 64,
201
+ "model.layers.31.self_attn.k_proj": 60,
202
+ "model.layers.31.self_attn.o_proj": 69,
203
+ "model.layers.31.self_attn.q_proj": 61,
204
+ "model.layers.31.self_attn.v_proj": 68,
205
+ "model.layers.32.mlp.down_proj": 65,
206
+ "model.layers.32.mlp.gate_proj": 66,
207
+ "model.layers.32.mlp.up_proj": 64,
208
+ "model.layers.32.self_attn.k_proj": 59,
209
+ "model.layers.32.self_attn.o_proj": 69,
210
+ "model.layers.32.self_attn.q_proj": 60,
211
+ "model.layers.32.self_attn.v_proj": 69,
212
+ "model.layers.33.mlp.down_proj": 65,
213
+ "model.layers.33.mlp.gate_proj": 66,
214
+ "model.layers.33.mlp.up_proj": 64,
215
+ "model.layers.33.self_attn.k_proj": 65,
216
+ "model.layers.33.self_attn.o_proj": 70,
217
+ "model.layers.33.self_attn.q_proj": 65,
218
+ "model.layers.33.self_attn.v_proj": 69,
219
+ "model.layers.34.mlp.down_proj": 65,
220
+ "model.layers.34.mlp.gate_proj": 66,
221
+ "model.layers.34.mlp.up_proj": 64,
222
+ "model.layers.34.self_attn.k_proj": 63,
223
+ "model.layers.34.self_attn.o_proj": 69,
224
+ "model.layers.34.self_attn.q_proj": 63,
225
+ "model.layers.34.self_attn.v_proj": 69,
226
+ "model.layers.35.mlp.down_proj": 65,
227
+ "model.layers.35.mlp.gate_proj": 66,
228
+ "model.layers.35.mlp.up_proj": 64,
229
+ "model.layers.35.self_attn.k_proj": 61,
230
+ "model.layers.35.self_attn.o_proj": 69,
231
+ "model.layers.35.self_attn.q_proj": 61,
232
+ "model.layers.35.self_attn.v_proj": 69,
233
+ "model.layers.36.mlp.down_proj": 65,
234
+ "model.layers.36.mlp.gate_proj": 66,
235
+ "model.layers.36.mlp.up_proj": 64,
236
+ "model.layers.36.self_attn.k_proj": 63,
237
+ "model.layers.36.self_attn.o_proj": 70,
238
+ "model.layers.36.self_attn.q_proj": 63,
239
+ "model.layers.36.self_attn.v_proj": 70,
240
+ "model.layers.37.mlp.down_proj": 65,
241
+ "model.layers.37.mlp.gate_proj": 66,
242
+ "model.layers.37.mlp.up_proj": 64,
243
+ "model.layers.37.self_attn.k_proj": 59,
244
+ "model.layers.37.self_attn.o_proj": 70,
245
+ "model.layers.37.self_attn.q_proj": 60,
246
+ "model.layers.37.self_attn.v_proj": 70,
247
+ "model.layers.38.mlp.down_proj": 65,
248
+ "model.layers.38.mlp.gate_proj": 66,
249
+ "model.layers.38.mlp.up_proj": 64,
250
+ "model.layers.38.self_attn.k_proj": 57,
251
+ "model.layers.38.self_attn.o_proj": 71,
252
+ "model.layers.38.self_attn.q_proj": 58,
253
+ "model.layers.38.self_attn.v_proj": 71,
254
+ "model.layers.39.mlp.down_proj": 65,
255
+ "model.layers.39.mlp.gate_proj": 66,
256
+ "model.layers.39.mlp.up_proj": 64,
257
+ "model.layers.39.self_attn.k_proj": 57,
258
+ "model.layers.39.self_attn.o_proj": 70,
259
+ "model.layers.39.self_attn.q_proj": 58,
260
+ "model.layers.39.self_attn.v_proj": 70,
261
+ "model.layers.4.mlp.down_proj": 62,
262
+ "model.layers.4.mlp.gate_proj": 63,
263
+ "model.layers.4.mlp.up_proj": 62,
264
+ "model.layers.4.self_attn.k_proj": 77,
265
+ "model.layers.4.self_attn.o_proj": 53,
266
+ "model.layers.4.self_attn.q_proj": 76,
267
+ "model.layers.4.self_attn.v_proj": 51,
268
+ "model.layers.40.mlp.down_proj": 65,
269
+ "model.layers.40.mlp.gate_proj": 67,
270
+ "model.layers.40.mlp.up_proj": 65,
271
+ "model.layers.40.self_attn.k_proj": 57,
272
+ "model.layers.40.self_attn.o_proj": 72,
273
+ "model.layers.40.self_attn.q_proj": 57,
274
+ "model.layers.40.self_attn.v_proj": 71,
275
+ "model.layers.41.mlp.down_proj": 65,
276
+ "model.layers.41.mlp.gate_proj": 66,
277
+ "model.layers.41.mlp.up_proj": 65,
278
+ "model.layers.41.self_attn.k_proj": 54,
279
+ "model.layers.41.self_attn.o_proj": 71,
280
+ "model.layers.41.self_attn.q_proj": 55,
281
+ "model.layers.41.self_attn.v_proj": 71,
282
+ "model.layers.42.mlp.down_proj": 65,
283
+ "model.layers.42.mlp.gate_proj": 66,
284
+ "model.layers.42.mlp.up_proj": 65,
285
+ "model.layers.42.self_attn.k_proj": 52,
286
+ "model.layers.42.self_attn.o_proj": 71,
287
+ "model.layers.42.self_attn.q_proj": 53,
288
+ "model.layers.42.self_attn.v_proj": 71,
289
+ "model.layers.43.mlp.down_proj": 65,
290
+ "model.layers.43.mlp.gate_proj": 67,
291
+ "model.layers.43.mlp.up_proj": 65,
292
+ "model.layers.43.self_attn.k_proj": 58,
293
+ "model.layers.43.self_attn.o_proj": 71,
294
+ "model.layers.43.self_attn.q_proj": 58,
295
+ "model.layers.43.self_attn.v_proj": 71,
296
+ "model.layers.44.mlp.down_proj": 65,
297
+ "model.layers.44.mlp.gate_proj": 67,
298
+ "model.layers.44.mlp.up_proj": 65,
299
+ "model.layers.44.self_attn.k_proj": 55,
300
+ "model.layers.44.self_attn.o_proj": 71,
301
+ "model.layers.44.self_attn.q_proj": 56,
302
+ "model.layers.44.self_attn.v_proj": 71,
303
+ "model.layers.45.mlp.down_proj": 65,
304
+ "model.layers.45.mlp.gate_proj": 67,
305
+ "model.layers.45.mlp.up_proj": 65,
306
+ "model.layers.45.self_attn.k_proj": 55,
307
+ "model.layers.45.self_attn.o_proj": 71,
308
+ "model.layers.45.self_attn.q_proj": 56,
309
+ "model.layers.45.self_attn.v_proj": 71,
310
+ "model.layers.46.mlp.down_proj": 65,
311
+ "model.layers.46.mlp.gate_proj": 67,
312
+ "model.layers.46.mlp.up_proj": 65,
313
+ "model.layers.46.self_attn.k_proj": 50,
314
+ "model.layers.46.self_attn.o_proj": 68,
315
+ "model.layers.46.self_attn.q_proj": 52,
316
+ "model.layers.46.self_attn.v_proj": 69,
317
+ "model.layers.47.mlp.down_proj": 65,
318
+ "model.layers.47.mlp.gate_proj": 67,
319
+ "model.layers.47.mlp.up_proj": 65,
320
+ "model.layers.47.self_attn.k_proj": 50,
321
+ "model.layers.47.self_attn.o_proj": 70,
322
+ "model.layers.47.self_attn.q_proj": 52,
323
+ "model.layers.47.self_attn.v_proj": 71,
324
+ "model.layers.48.mlp.down_proj": 65,
325
+ "model.layers.48.mlp.gate_proj": 67,
326
+ "model.layers.48.mlp.up_proj": 65,
327
+ "model.layers.48.self_attn.k_proj": 53,
328
+ "model.layers.48.self_attn.o_proj": 71,
329
+ "model.layers.48.self_attn.q_proj": 55,
330
+ "model.layers.48.self_attn.v_proj": 71,
331
+ "model.layers.49.mlp.down_proj": 65,
332
+ "model.layers.49.mlp.gate_proj": 67,
333
+ "model.layers.49.mlp.up_proj": 66,
334
+ "model.layers.49.self_attn.k_proj": 57,
335
+ "model.layers.49.self_attn.o_proj": 73,
336
+ "model.layers.49.self_attn.q_proj": 58,
337
+ "model.layers.49.self_attn.v_proj": 74,
338
+ "model.layers.5.mlp.down_proj": 62,
339
+ "model.layers.5.mlp.gate_proj": 64,
340
+ "model.layers.5.mlp.up_proj": 63,
341
+ "model.layers.5.self_attn.k_proj": 76,
342
+ "model.layers.5.self_attn.o_proj": 53,
343
+ "model.layers.5.self_attn.q_proj": 75,
344
+ "model.layers.5.self_attn.v_proj": 52,
345
+ "model.layers.50.mlp.down_proj": 65,
346
+ "model.layers.50.mlp.gate_proj": 67,
347
+ "model.layers.50.mlp.up_proj": 66,
348
+ "model.layers.50.self_attn.k_proj": 56,
349
+ "model.layers.50.self_attn.o_proj": 72,
350
+ "model.layers.50.self_attn.q_proj": 57,
351
+ "model.layers.50.self_attn.v_proj": 72,
352
+ "model.layers.51.mlp.down_proj": 65,
353
+ "model.layers.51.mlp.gate_proj": 67,
354
+ "model.layers.51.mlp.up_proj": 66,
355
+ "model.layers.51.self_attn.k_proj": 57,
356
+ "model.layers.51.self_attn.o_proj": 70,
357
+ "model.layers.51.self_attn.q_proj": 58,
358
+ "model.layers.51.self_attn.v_proj": 71,
359
+ "model.layers.52.mlp.down_proj": 65,
360
+ "model.layers.52.mlp.gate_proj": 66,
361
+ "model.layers.52.mlp.up_proj": 66,
362
+ "model.layers.52.self_attn.k_proj": 54,
363
+ "model.layers.52.self_attn.o_proj": 70,
364
+ "model.layers.52.self_attn.q_proj": 55,
365
+ "model.layers.52.self_attn.v_proj": 70,
366
+ "model.layers.53.mlp.down_proj": 65,
367
+ "model.layers.53.mlp.gate_proj": 66,
368
+ "model.layers.53.mlp.up_proj": 66,
369
+ "model.layers.53.self_attn.k_proj": 54,
370
+ "model.layers.53.self_attn.o_proj": 68,
371
+ "model.layers.53.self_attn.q_proj": 56,
372
+ "model.layers.53.self_attn.v_proj": 69,
373
+ "model.layers.54.mlp.down_proj": 66,
374
+ "model.layers.54.mlp.gate_proj": 66,
375
+ "model.layers.54.mlp.up_proj": 67,
376
+ "model.layers.54.self_attn.k_proj": 55,
377
+ "model.layers.54.self_attn.o_proj": 70,
378
+ "model.layers.54.self_attn.q_proj": 56,
379
+ "model.layers.54.self_attn.v_proj": 71,
380
+ "model.layers.55.mlp.down_proj": 66,
381
+ "model.layers.55.mlp.gate_proj": 66,
382
+ "model.layers.55.mlp.up_proj": 67,
383
+ "model.layers.55.self_attn.k_proj": 56,
384
+ "model.layers.55.self_attn.o_proj": 70,
385
+ "model.layers.55.self_attn.q_proj": 57,
386
+ "model.layers.55.self_attn.v_proj": 70,
387
+ "model.layers.56.mlp.down_proj": 65,
388
+ "model.layers.56.mlp.gate_proj": 66,
389
+ "model.layers.56.mlp.up_proj": 67,
390
+ "model.layers.56.self_attn.k_proj": 53,
391
+ "model.layers.56.self_attn.o_proj": 73,
392
+ "model.layers.56.self_attn.q_proj": 54,
393
+ "model.layers.56.self_attn.v_proj": 74,
394
+ "model.layers.57.mlp.down_proj": 66,
395
+ "model.layers.57.mlp.gate_proj": 66,
396
+ "model.layers.57.mlp.up_proj": 67,
397
+ "model.layers.57.self_attn.k_proj": 54,
398
+ "model.layers.57.self_attn.o_proj": 68,
399
+ "model.layers.57.self_attn.q_proj": 55,
400
+ "model.layers.57.self_attn.v_proj": 69,
401
+ "model.layers.58.mlp.down_proj": 65,
402
+ "model.layers.58.mlp.gate_proj": 67,
403
+ "model.layers.58.mlp.up_proj": 67,
404
+ "model.layers.58.self_attn.k_proj": 49,
405
+ "model.layers.58.self_attn.o_proj": 63,
406
+ "model.layers.58.self_attn.q_proj": 50,
407
+ "model.layers.58.self_attn.v_proj": 65,
408
+ "model.layers.59.mlp.down_proj": 65,
409
+ "model.layers.59.mlp.gate_proj": 68,
410
+ "model.layers.59.mlp.up_proj": 68,
411
+ "model.layers.59.self_attn.k_proj": 53,
412
+ "model.layers.59.self_attn.o_proj": 57,
413
+ "model.layers.59.self_attn.q_proj": 53,
414
+ "model.layers.59.self_attn.v_proj": 60,
415
+ "model.layers.6.mlp.down_proj": 62,
416
+ "model.layers.6.mlp.gate_proj": 64,
417
+ "model.layers.6.mlp.up_proj": 63,
418
+ "model.layers.6.self_attn.k_proj": 76,
419
+ "model.layers.6.self_attn.o_proj": 54,
420
+ "model.layers.6.self_attn.q_proj": 75,
421
+ "model.layers.6.self_attn.v_proj": 53,
422
+ "model.layers.7.mlp.down_proj": 62,
423
+ "model.layers.7.mlp.gate_proj": 64,
424
+ "model.layers.7.mlp.up_proj": 64,
425
+ "model.layers.7.self_attn.k_proj": 78,
426
+ "model.layers.7.self_attn.o_proj": 56,
427
+ "model.layers.7.self_attn.q_proj": 77,
428
+ "model.layers.7.self_attn.v_proj": 55,
429
+ "model.layers.8.mlp.down_proj": 62,
430
+ "model.layers.8.mlp.gate_proj": 64,
431
+ "model.layers.8.mlp.up_proj": 64,
432
+ "model.layers.8.self_attn.k_proj": 80,
433
+ "model.layers.8.self_attn.o_proj": 58,
434
+ "model.layers.8.self_attn.q_proj": 78,
435
+ "model.layers.8.self_attn.v_proj": 58,
436
+ "model.layers.9.mlp.down_proj": 62,
437
+ "model.layers.9.mlp.gate_proj": 64,
438
+ "model.layers.9.mlp.up_proj": 64,
439
+ "model.layers.9.self_attn.k_proj": 80,
440
+ "model.layers.9.self_attn.o_proj": 58,
441
+ "model.layers.9.self_attn.q_proj": 78,
442
+ "model.layers.9.self_attn.v_proj": 58
443
+ },
444
+ "target_modules": [
445
+ "down_proj",
446
+ "gate_proj",
447
+ "k_proj",
448
+ "o_proj",
449
+ "v_proj",
450
+ "q_proj",
451
+ "up_proj"
452
+ ],
453
+ "task_type": "CAUSAL_LM",
454
+ "token_dim": 6656
455
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c4df3d633f53083955784dfd9a84108eb5bdb14769a991e6428b2962184008f
3
+ size 975880621
usage.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.insert(1, '/workspace/asr/peft/src')
3
+ # TODO set this path to the lazy-lora source code path, or you can install it from source code:
4
+ # TODO, please install lazylora for usage:
5
+ # git clone [email protected]:Xianchao-Wu/peft.git
6
+ # cd peft
7
+ # python setup.py install
8
+
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
10
+ from peft import PeftModel, PeftConfig
11
+ import os
12
+ import torch
13
+
14
+ #import ipdb; ipdb.set_trace()
15
+ cache_dir="/workspace/asr/peft/qlora"
16
+ # TODO set this cache_dir to the path where you stored (or, want to store) llama1-33b (huggyllama/llama-30b) model
17
+
18
+ lazylora_dir=os.getcwd() # the path that contains 'adapter_config.json' and 'adapter_model.bin'
19
+
20
+ config = PeftConfig.from_pretrained(lazylora_dir)
21
+
22
+ tokenizer = AutoTokenizer.from_pretrained(
23
+ config.base_model_name_or_path,
24
+ cache_dir=cache_dir,
25
+ use_auth_token=True
26
+ )
27
+
28
+ bnb_config = BitsAndBytesConfig(
29
+ load_in_4bit=True,
30
+ bnb_4bit_use_double_quant=True,
31
+ bnb_4bit_quant_type='nf4',
32
+ bnb_4bit_compute_dtype=torch.bfloat16
33
+ )
34
+
35
+ model = AutoModelForCausalLM.from_pretrained(
36
+ config.base_model_name_or_path,
37
+ quantization_config=bnb_config,
38
+ device_map="auto",
39
+ cache_dir=cache_dir,
40
+ use_auth_token=True
41
+ )
42
+ #model.print_trainable_parameters()
43
+ print(sum(p.numel() for p in model.parameters()))
44
+ # 16,477,866,496 -> half-size of 33B due to 4-bit loading
45
+
46
+ model = PeftModel.from_pretrained(model, lazylora_dir)
47
+ print('after adding lazy lora parameters:')
48
+ model.print_trainable_parameters()
49
+ # trainable params: 0 || all params: 16,965,645,824 || trainable%: 0.0
50
+
51
+