Spaces:
Runtime error
Runtime error
File size: 3,845 Bytes
79ebcf2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
from pydantic import BaseModel
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.agents import Tool, AgentExecutor, create_json_chat_agent, create_react_agent
from langchain_core.messages import AIMessage, HumanMessage
from personal_models import PersonalModels
from personal_templates import PersonalTemplate
import uuid
#------------------------------- Mixtral
def create_model_instances(model_name, embedding_model, embedding_dir, agent_type):
templates = PersonalTemplate()
models = PersonalModels()
chat_model = models.get_chat_model(
model=model_name,
temperature=0.1,
max_tokens=1000)
llm = models.get_rag_model(
model=model_name,
temperature=0.1,
max_tokens=500,
top_k=20)
embeddings = models.get_embedding_model(model=embedding_model)
vectorstore = FAISS.load_local(embedding_dir, embeddings)
qa_tool = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectorstore.as_retriever(),
verbose=True,
chain_type_kwargs={'prompt': templates.retriever_prompt}
)
tools = [
Tool(
name='tax-law-search',
func=qa_tool.run,
description='useful when you want to answer questions about brazilian tax law and legal entities',
verbose=True
)
]
print('Usando agente do tipo: ',agent_type)
if agent_type == 'JSON_CHAT_MODEL':
agent = create_json_chat_agent(chat_model, tools, templates.chatbot_tributario_prompt)
else:
agent = create_react_agent(llm, tools, templates.chatbot_tributario_prompt_react)
agent_executor = AgentExecutor(agent=agent, tools=tools, handle_parsing_errors="Check your output and make sure it conforms! Do not output an action and a final answer at the same time.", return_intermediate_steps=True, verbose=True)
return agent_executor
#-------------------------------- Classe Modificada
class Message(BaseModel):
user_id: str
text: str
model: str
embedding_model: str
embedding_dir: str
agent_type: str
class ChatHandler:
historico_por_usuario = {}
def __init__(self):
pass
def generate_id(self):
new_id = str(uuid.uuid4())
self.historico_por_usuario[new_id] = []
return new_id
def clear_history(self, user_id):
if user_id not in self.historico_por_usuario:
return "Usuário não encontrado"
self.historico_por_usuario[user_id].clear()
return "Histórico limpo com sucesso"
def post_message(self, message: dict):
user_id = message['user_id']
if user_id not in self.historico_por_usuario:
return "Usuário não encontrado"
model_name = message['model']
agent_executor = create_model_instances(model_name, message['embedding_model'], message['embedding_dir'], message['agent_type'])
try:
print('message: ', message['text'])
chat_history = self.historico_por_usuario[user_id][-10:]
response = agent_executor.invoke(
{
"input": message['text'],
"chat_history": chat_history
}
)
print(response['intermediate_steps'])
self.historico_por_usuario[user_id].append(HumanMessage(content=message['text']))
self.historico_por_usuario[user_id].append(AIMessage(content=response['output']))
except Exception as e:
response = {'output': 'Ocorreu um erro de execução do Agente. O erro não será salvo no histórico, tente novamente.',
'intermediate_steps':[]}
return response['output'],response['intermediate_steps']
|