|
from fastapi import FastAPI |
|
|
|
from fastapi.middleware.cors import CORSMiddleware |
|
|
|
from chatbot import Chatbot |
|
|
|
from chatbotmemory import ChatbotMemory |
|
|
|
import logging |
|
|
|
from langchain_core.messages import AIMessage, HumanMessage |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") |
|
|
|
handler = logging.StreamHandler() |
|
|
|
handler.setFormatter(formatter) |
|
|
|
logger.addHandler(handler) |
|
|
|
|
|
|
|
origins = ["*"] |
|
|
|
app.add_middleware( |
|
CORSMiddleware, |
|
allow_origins=origins, |
|
allow_credentials=True, |
|
allow_methods=["GET", "POST", "PUT", "DELETE"], |
|
allow_headers=["*"], |
|
) |
|
|
|
bot1 = Chatbot() |
|
bot2 = ChatbotMemory() |
|
|
|
@app.get("/") |
|
|
|
def read_root(): |
|
|
|
return { |
|
|
|
"message": "API running successfully", |
|
|
|
"endpoints": [ |
|
|
|
"/chat/v1/", |
|
|
|
"/chat/v2/", |
|
|
|
] |
|
|
|
} |
|
|
|
@app.post("/chat/v1/") |
|
def chat(q: str): |
|
logger.info(q) |
|
answer = bot1.rag_chain.invoke(q) |
|
return {"answer": answer} |
|
|
|
@app.post("/chat/v2/") |
|
def chatMemory(q: str): |
|
chat_history = [] |
|
logger.info(q) |
|
ai_msg = bot2.rag_chain.invoke({"question": q, "chat_history": chat_history}) |
|
chat_history.extend([HumanMessage(content=q), ai_msg]) |
|
return {"answer": ai_msg} |