|
from typing import Dict, List, Any |
|
from transformers import AutoTokenizer, TextGenerationPipeline, pipeline |
|
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig |
|
|
|
|
|
class PreTrainedPipeline(): |
|
def __init__(self, path=""): |
|
tokenizer = AutoTokenizer.from_pretrained(path) |
|
model = AutoGPTQForCausalLM.from_quantized(path, device="cuda:0", use_safetensors=True) |
|
|
|
|
|
def __call__(self, data: Any) -> List[List[Dict[str, float]]]: |
|
""" |
|
Args: |
|
data (:obj:): |
|
includes the input data and the parameters for the inference. |
|
Return: |
|
A :obj:`list`:. The list contains the embeddings of the inference inputs |
|
""" |
|
inputs = data.get("inputs", data) |
|
parameters = data.get("parameters", {}) |
|
|
|
|
|
input_ids = self.tokenizer(inputs,return_tensors="pt").input_ids.to(self.model.device) |
|
|
|
logits = self.model.generate(input_ids, **parameters) |
|
|
|
|
|
return {"generated_text": self.tokenizer.decode(logits[0].tolist())} |