Update README.md
Browse files
README.md
CHANGED
@@ -2,4 +2,24 @@
|
|
2 |
license: apache-2.0
|
3 |
language:
|
4 |
- zh
|
5 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
license: apache-2.0
|
3 |
language:
|
4 |
- zh
|
5 |
+
---
|
6 |
+
A instruction-tuned LoRA model of https://huggingface.co/baichuan-inc/baichuan-7B
|
7 |
+
Training framework: https://github.com/hiyouga/LLaMA-Factory
|
8 |
+
Please follow the baichuan-7B License to use this model.
|
9 |
+
|
10 |
+
Usage:
|
11 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
|
12 |
+
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained("hiyouga/baichuan-7b-sft", trust_remote_code=True)
|
14 |
+
model = AutoModelForCausalLM.from_pretrained("hiyouga/baichuan-7b-sft", trust_remote_code=True).cuda()
|
15 |
+
streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
16 |
+
|
17 |
+
query = "晚上睡不着怎么办"
|
18 |
+
template = (
|
19 |
+
"你是一名经验丰富的心理咨询师,专长于认知行为疗法, 以心理咨询师的身份回答以下问题。\n"
|
20 |
+
"Human: {}\nAssistant: "
|
21 |
+
)
|
22 |
+
|
23 |
+
inputs = tokenizer([template.format(query)], return_tensors="pt")
|
24 |
+
inputs = inputs.to("cuda")
|
25 |
+
generate_ids = model.generate(**inputs, max_new_tokens=256, streamer=streamer)
|