Spaces:
Running
on
Zero
Running
on
Zero
Ventsislav Muchinov
commited on
Commit
•
de66c65
1
Parent(s):
9a47cf8
Upload app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ import spaces
|
|
7 |
import torch
|
8 |
|
9 |
|
10 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
11 |
|
12 |
MAX_MAX_NEW_TOKENS = 2048
|
13 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
@@ -16,14 +16,17 @@ ACCESS_TOKEN = os.getenv("HF_TOKEN", "")
|
|
16 |
|
17 |
model_id = "Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8"
|
18 |
|
|
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained(
|
21 |
model_id,
|
22 |
torch_dtype=torch.float16,
|
23 |
-
device_map="
|
24 |
trust_remote_code=True,
|
25 |
low_cpu_mem_usage=True,
|
26 |
-
|
|
|
|
|
27 |
tokenizer = AutoTokenizer.from_pretrained(
|
28 |
model_id,
|
29 |
trust_remote_code=True,
|
@@ -44,7 +47,7 @@ def generate(
|
|
44 |
conversation.append({"role": "system", "content": system_prompt})
|
45 |
conversation.append({"role": "user", "content": message})
|
46 |
|
47 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
|
48 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
49 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
50 |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
|
|
7 |
import torch
|
8 |
|
9 |
|
10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
|
11 |
|
12 |
MAX_MAX_NEW_TOKENS = 2048
|
13 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
|
|
16 |
|
17 |
model_id = "Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8"
|
18 |
|
19 |
+
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
20 |
|
21 |
model = AutoModelForCausalLM.from_pretrained(
|
22 |
model_id,
|
23 |
torch_dtype=torch.float16,
|
24 |
+
device_map="cuda",
|
25 |
trust_remote_code=True,
|
26 |
low_cpu_mem_usage=True,
|
27 |
+
quantize_config=quantization_config
|
28 |
+
token=ACCESS_TOKEN).to("cuda")
|
29 |
+
|
30 |
tokenizer = AutoTokenizer.from_pretrained(
|
31 |
model_id,
|
32 |
trust_remote_code=True,
|
|
|
47 |
conversation.append({"role": "system", "content": system_prompt})
|
48 |
conversation.append({"role": "user", "content": message})
|
49 |
|
50 |
+
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to("cuda")
|
51 |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
52 |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
53 |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|