Update README.md
Browse files
README.md
CHANGED
@@ -3,3 +3,33 @@ license: other
|
|
3 |
license_name: internlm-license
|
4 |
license_link: https://huggingface.co/internlm/internlm-chat-7b-v1_1
|
5 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
license_name: internlm-license
|
4 |
license_link: https://huggingface.co/internlm/internlm-chat-7b-v1_1
|
5 |
---
|
6 |
+
|
7 |
+
internlm-chat-7b-v1_1をGPTQ変換したモデルです<br>
|
8 |
+
利用に当たってはhttps://huggingface.co/internlm/internlm-chat-7b-v1_1のライセンスに従って下さい<br>
|
9 |
+
<br>
|
10 |
+
推論用コード<br>
|
11 |
+
```
|
12 |
+
import torch
|
13 |
+
import time
|
14 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM,GPTQConfig
|
15 |
+
|
16 |
+
model_path = r".\internlm-chat-7b-v1_1-gptq"
|
17 |
+
|
18 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
|
19 |
+
|
20 |
+
gptq_config = GPTQConfig(bits= 4 , disable_exllama= True )
|
21 |
+
model = AutoModelForCausalLM.from_pretrained( model_path , device_map= "auto" , quantization_config = gptq_config,trust_remote_code=True)
|
22 |
+
model = model.eval()
|
23 |
+
|
24 |
+
history = []
|
25 |
+
|
26 |
+
while True:
|
27 |
+
txt = input("msg:")
|
28 |
+
start_time = time.perf_counter()
|
29 |
+
response, history = model.chat(tokenizer, txt, history=history)
|
30 |
+
print(response)
|
31 |
+
end_time = time.perf_counter()
|
32 |
+
elapsed_time = end_time - start_time
|
33 |
+
print(f"worktime:{elapsed_time}")
|
34 |
+
|
35 |
+
```
|