Initial GPTQ model commit
Browse files- handler.py +49 -0
handler.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
5 |
+
|
6 |
+
from peft import PeftConfig, PeftModel
|
7 |
+
|
8 |
+
|
9 |
+
class EndpointHandler:
|
10 |
+
def __init__(self, path=""):
|
11 |
+
# load model and processor from path
|
12 |
+
self.tokenizer = AutoTokenizer.from_pretrained(path)
|
13 |
+
try:
|
14 |
+
config = PeftConfig.from_pretrained(path)
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(
|
16 |
+
config.base_model_name_or_path,
|
17 |
+
return_dict=True,
|
18 |
+
load_in_8bit=True,
|
19 |
+
device_map="auto",
|
20 |
+
torch_dtype=torch.float16,
|
21 |
+
trust_remote_code=True,
|
22 |
+
)
|
23 |
+
model.resize_token_embeddings(len(self.tokenizer))
|
24 |
+
model = PeftModel.from_pretrained(model, path)
|
25 |
+
except Exception:
|
26 |
+
model = AutoModelForCausalLM.from_pretrained(
|
27 |
+
path, device_map="auto", load_in_8bit=True, torch_dtype=torch.float16, trust_remote_code=True
|
28 |
+
)
|
29 |
+
self.model = model
|
30 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
31 |
+
|
32 |
+
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
|
33 |
+
# process input
|
34 |
+
inputs = data.pop("inputs", data)
|
35 |
+
parameters = data.pop("parameters", None)
|
36 |
+
|
37 |
+
# preprocess
|
38 |
+
inputs = self.tokenizer(inputs, return_tensors="pt").to(self.device)
|
39 |
+
|
40 |
+
# pass inputs with all kwargs in data
|
41 |
+
if parameters is not None:
|
42 |
+
outputs = self.model.generate(**inputs, **parameters)
|
43 |
+
else:
|
44 |
+
outputs = self.model.generate(**inputs)
|
45 |
+
|
46 |
+
# postprocess the prediction
|
47 |
+
prediction = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
48 |
+
|
49 |
+
return [{"generated_text": prediction}]
|