|
from huggingface_hub import InferenceClient |
|
from typing import List, Dict |
|
from config import HF_TOKEN, MODEL_NAME, SYSTEM_PROMPT |
|
|
|
class Chatbot: |
|
def __init__(self): |
|
self.client = InferenceClient(api_key=HF_TOKEN) |
|
self.conversation_history = [ |
|
{"role": "system", "content": SYSTEM_PROMPT} |
|
] |
|
|
|
def generate_context(self, relevant_chunks: List[Dict]) -> str: |
|
context = "Kontekst z przepisów prawnych:\n\n" |
|
for chunk in relevant_chunks: |
|
context += f"{chunk['text']}\n\n" |
|
return context |
|
|
|
def get_response(self, user_input: str, context: str) -> str: |
|
messages = self.conversation_history + [ |
|
{"role": "user", "content": f"Kontekst: {context}\n\nPytanie: {user_input}"} |
|
] |
|
|
|
response = "" |
|
stream = self.client.chat.completions.create( |
|
model=MODEL_NAME, |
|
messages=messages, |
|
temperature=0.5, |
|
max_tokens=8192, |
|
top_p=0.7, |
|
stream=True |
|
) |
|
|
|
for chunk in stream: |
|
content = chunk.choices[0].delta.content |
|
if content: |
|
response += content |
|
yield content |
|
|
|
self.conversation_history.append({"role": "user", "content": user_input}) |
|
self.conversation_history.append({"role": "assistant", "content": response}) |
|
|
|
def clear_history(self): |
|
self.conversation_history = [ |
|
{"role": "system", "content": SYSTEM_PROMPT} |
|
] |