project_charles / chat_service.py
sohojoe's picture
debugging gpu
92b10f9
raw
history blame
2.91 kB
import os
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
# from huggingface_hub.inference_api import InferenceApi
class ChatService:
def __init__(self, api="huggingface", repo_id = "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"):
self._api = api
self._device = "cuda:0" if torch.cuda.is_available() else "cpu"
if self._api=="huggingface":
self._tokenizer = AutoTokenizer.from_pretrained(repo_id)
self._model = AutoModelForCausalLM.from_pretrained(repo_id,torch_dtype=torch.float16)
# self._model = AutoModelForCausalLM.from_pretrained(repo_id).half()
self._model.eval().to(self._device)
else:
raise Exception(f"Unknown API: {self._api}")
self._system_prompt = "Below are a series of dialogues between various people and an AI assistant. The AI tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble-but-knowledgeable. The assistant is happy to help with almost anything, and will do its best to understand exactly what is needed. It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer. That said, the assistant is practical and really does its best, and doesn't let caution get too much in the way of being useful.\n-----\n"
self._user_name = "<|prompter|>"
self._agent_name = "<|assistant|>"
self.reset()
def reset(self):
self._user_history = []
self._agent_history = []
self._full_history = self._user_history if self._user_history else ""
def _chat(self, prompt):
if self._api=="huggingface":
tokens = self._tokenizer.encode(prompt, return_tensors="pt", padding=True)
tokens = tokens.to(self._device)
outputs = self._model.generate(
tokens,
early_stopping=True,
max_new_tokens=200,
do_sample=True,
top_k=40,
temperature=1.0,
pad_token_id=self._tokenizer.eos_token_id,
)
agent_response = self._tokenizer.decode(outputs[0], truncate_before_pattern=[r"\n\n^#", "^'''", "\n\n\n"])
else:
raise Exception(f"API not implemented: {self._api}")
return agent_response
def chat(self, prompt):
if self._user_name:
self._full_history += f"{self._user_name}: {prompt}\n"
else:
self._full_history += f"{prompt}\n"
self._user_history.append(prompt)
agent_response = self._chat(self._full_history)
if self._agent_name:
self._full_history += f"{self._agent_name}: {agent_response}\n"
else:
self._full_history += f"{agent_response}\n"
self._agent_history.append(agent_response)
return agent_response