srinuksv commited on
Commit
4ddf4f2
1 Parent(s): da78af2

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +13 -0
  2. app.py +108 -0
  3. requirements.txt +6 -0
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12.5
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ COPY --chown=user . /app
13
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from fastapi import FastAPI
4
+ from fastapi.responses import HTMLResponse
5
+ from fastapi.staticfiles import StaticFiles
6
+ from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
7
+ from llama_index.llms.huggingface import HuggingFaceInferenceAPI
8
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
9
+ from pydantic import BaseModel
10
+ import datetime
11
+ # Define Pydantic model for incoming request body
12
+ class MessageRequest(BaseModel):
13
+ message: str
14
+
15
+
16
+ os.environ["HF_TOKEN"] = ""
17
+ app = FastAPI()
18
+
19
+ app.mount("/static", StaticFiles(directory="D:\SRUNU (1)\content\SRUNU\static"), name="static")
20
+
21
+
22
+ # Configure Llama index settings
23
+ Settings.llm = HuggingFaceInferenceAPI(
24
+ model_name="meta-llama/Meta-Llama-3-8B-Instruct",
25
+ tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
26
+ context_window=3000,
27
+ token="",
28
+ max_new_tokens=512,
29
+ generate_kwargs={"temperature": 0.1},
30
+ )
31
+ Settings.embed_model = HuggingFaceEmbedding(
32
+ model_name="BAAI/bge-small-en-v1.5"
33
+ )
34
+
35
+ PERSIST_DIR = "db"
36
+ PDF_DIRECTORY = 'data'
37
+
38
+ # Ensure directories exist
39
+ os.makedirs(PDF_DIRECTORY, exist_ok=True)
40
+ os.makedirs(PERSIST_DIR, exist_ok=True)
41
+ chat_history = []
42
+ current_chat_history = []
43
+ def data_ingestion_from_directory():
44
+ documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
45
+ storage_context = StorageContext.from_defaults()
46
+ index = VectorStoreIndex.from_documents(documents)
47
+ index.storage_context.persist(persist_dir=PERSIST_DIR)
48
+
49
+ def initialize():
50
+ start_time = time.time()
51
+ data_ingestion_from_directory() # Process PDF ingestion at startup
52
+ print(f"Data ingestion time: {time.time() - start_time} seconds")
53
+
54
+ initialize() # Run initialization tasks
55
+
56
+
57
+ def handle_query(query):
58
+ chat_text_qa_msgs = [
59
+ (
60
+ "user",
61
+ """
62
+ You are the Clara Redfernstech chatbot. Your goal is to provide accurate, professional, and helpful answers to user queries based on the company's data. Always ensure your responses are clear and concise. Give response within 10-15 words only
63
+ {context_str}
64
+ Question:
65
+ {query_str}
66
+ """
67
+ )
68
+ ]
69
+ text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
70
+
71
+ storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
72
+ index = load_index_from_storage(storage_context)
73
+ context_str = ""
74
+ for past_query, response in reversed(current_chat_history):
75
+ if past_query.strip():
76
+ context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
77
+
78
+
79
+ query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
80
+ answer = query_engine.query(query)
81
+
82
+ if hasattr(answer, 'response'):
83
+ response=answer.response
84
+ elif isinstance(answer, dict) and 'response' in answer:
85
+ response =answer['response']
86
+ else:
87
+ response ="Sorry, I couldn't find an answer."
88
+ current_chat_history.append((query, response))
89
+ return response
90
+ @app.get("/", response_class=HTMLResponse)
91
+ async def read_root():
92
+ with open("static/index.html") as f:
93
+ return f.read()
94
+
95
+ @app.post("/chat/")
96
+ async def chat(request: MessageRequest):
97
+ message = request.message # Access the message from the request body
98
+ response = handle_query(message) # Process the message
99
+ message_data = {
100
+ "sender": "User",
101
+ "message": message,
102
+ "response": response,
103
+ "timestamp": datetime.datetime.now().isoformat()
104
+ }
105
+ chat_history.append(message_data)
106
+ return {"response": response}
107
+
108
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ python-dotenv
2
+ llama-index
3
+ llama-index-embeddings-huggingface
4
+ llama-index-llms-huggingface
5
+ uvicorn
6
+ fastapi