srinuksv commited on
Commit
ca06540
1 Parent(s): d776cdd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -10
app.py CHANGED
@@ -1,15 +1,19 @@
1
  import os
2
  import time
3
- from fastapi import FastAPI
4
  from fastapi.responses import HTMLResponse
5
  from fastapi.staticfiles import StaticFiles
6
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
7
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
8
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
9
  from pydantic import BaseModel
 
 
10
  import datetime
11
- from dotenv import load_dotenv
12
- load_dotenv()
 
 
13
  # Define Pydantic model for incoming request body
14
  class MessageRequest(BaseModel):
15
  message: str
@@ -18,9 +22,34 @@ class MessageRequest(BaseModel):
18
  os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
19
  app = FastAPI()
20
 
21
- app.mount("/static", StaticFiles(directory="static"), name="static")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
 
 
 
 
 
 
 
 
 
24
  # Configure Llama index settings
25
  Settings.llm = HuggingFaceInferenceAPI(
26
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
@@ -89,10 +118,27 @@ def handle_query(query):
89
  response ="Sorry, I couldn't find an answer."
90
  current_chat_history.append((query, response))
91
  return response
92
- @app.get("/", response_class=HTMLResponse)
93
- async def read_root():
94
- with open("static/index.html") as f:
95
- return f.read()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
  @app.post("/chat/")
98
  async def chat(request: MessageRequest):
@@ -106,5 +152,6 @@ async def chat(request: MessageRequest):
106
  }
107
  chat_history.append(message_data)
108
  return {"response": response}
109
-
110
-
 
 
1
  import os
2
  import time
3
+ from fastapi import FastAPI,Request
4
  from fastapi.responses import HTMLResponse
5
  from fastapi.staticfiles import StaticFiles
6
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
7
  from llama_index.llms.huggingface import HuggingFaceInferenceAPI
8
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
9
  from pydantic import BaseModel
10
+ from fastapi.responses import JSONResponse
11
+ import uuid # for generating unique IDs
12
  import datetime
13
+ from fastapi.middleware.cors import CORSMiddleware
14
+ from fastapi.templating import Jinja2Templates
15
+
16
+
17
  # Define Pydantic model for incoming request body
18
  class MessageRequest(BaseModel):
19
  message: str
 
22
  os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
23
  app = FastAPI()
24
 
25
+ @app.middleware("http")
26
+ async def add_security_headers(request: Request, call_next):
27
+ response = await call_next(request)
28
+ response.headers["Content-Security-Policy"] = "frame-ancestors *; frame-src *; object-src *;"
29
+ response.headers["X-Frame-Options"] = "ALLOWALL"
30
+ return response
31
+
32
+
33
+ # Allow CORS requests from any domain
34
+ app.add_middleware(
35
+ CORSMiddleware,
36
+ allow_origins=["*"],
37
+ allow_credentials=True,
38
+ allow_methods=["*"],
39
+ allow_headers=["*"],
40
+ )
41
+
42
+
43
 
44
 
45
+ @app.get("/favicon.ico")
46
+ async def favicon():
47
+ return HTMLResponse("") # or serve a real favicon if you have one
48
+
49
+
50
+ app.mount("/static", StaticFiles(directory="static"), name="static")
51
+
52
+ templates = Jinja2Templates(directory="static")
53
  # Configure Llama index settings
54
  Settings.llm = HuggingFaceInferenceAPI(
55
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
 
118
  response ="Sorry, I couldn't find an answer."
119
  current_chat_history.append((query, response))
120
  return response
121
+ @app.get("/ch/{id}", response_class=HTMLResponse)
122
+ async def load_chat(request: Request, id: str):
123
+ return templates.TemplateResponse("index.html", {"request": request, "user_id": id})
124
+ # Route to save chat history
125
+ @app.post("/hist/")
126
+ async def save_chat_history(history: dict):
127
+ # Logic to save chat history, using the `id` from the frontend
128
+ print(history) # You can replace this with actual save logic
129
+ return {"message": "Chat history saved"}
130
+ @app.post("/webhook")
131
+ async def receive_form_data(request: Request):
132
+ form_data = await request.json()
133
+
134
+ # Generate a unique ID (for tracking user)
135
+ unique_id = str(uuid.uuid4())
136
+
137
+ # Here you can do something with form_data like saving it to a database
138
+ print("Received form data:", form_data)
139
+
140
+ # Send back the unique id to the frontend
141
+ return JSONResponse({"id": unique_id})
142
 
143
  @app.post("/chat/")
144
  async def chat(request: MessageRequest):
 
152
  }
153
  chat_history.append(message_data)
154
  return {"response": response}
155
+ @app.get("/")
156
+ def read_root():
157
+ return {"message": "Welcome to the API"}