Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,8 +14,7 @@ from neo4j import GraphDatabase
|
|
14 |
import networkx as nx
|
15 |
import pinecone
|
16 |
import os
|
17 |
-
|
18 |
-
os.system("pip install sentence-transformers")
|
19 |
|
20 |
# RAG Setup
|
21 |
text_path = r"./text_chunks.txt"
|
@@ -34,8 +33,6 @@ class CustomTextLoader(TextLoader):
|
|
34 |
text = f.read()
|
35 |
return [Document(page_content=text)]
|
36 |
|
37 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
38 |
-
|
39 |
embeddings = HuggingFaceEmbeddings()
|
40 |
|
41 |
from langchain.llms import HuggingFaceHub
|
@@ -69,21 +66,15 @@ rag_llm = ChatGroq(
|
|
69 |
groq_api_key='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'
|
70 |
)
|
71 |
|
72 |
-
from langchain import PromptTemplate
|
73 |
-
|
74 |
-
from langchain import PromptTemplate
|
75 |
-
|
76 |
template = """
|
77 |
You are a Thai rice assistants. These Human will ask you a questions about Thai Rice.
|
78 |
-
Answer the question
|
79 |
Use following piece of context to answer the question.
|
80 |
If you don't know the answer, just say you don't know.
|
81 |
Keep the answer within 2 sentences and concise.
|
82 |
-
|
83 |
Context: {context}
|
84 |
Question: {question}
|
85 |
Answer:
|
86 |
-
|
87 |
"""
|
88 |
|
89 |
prompt = PromptTemplate(
|
@@ -102,8 +93,6 @@ class ChatBot():
|
|
102 |
loader = CustomTextLoader(r'./text_chunks.txt', encoding='utf-8')
|
103 |
documents = loader.load()
|
104 |
|
105 |
-
# The rest of the code here
|
106 |
-
|
107 |
rag_chain = (
|
108 |
{"context": docsearch.as_retriever(), "question": RunnablePassthrough()}
|
109 |
| prompt
|
@@ -175,23 +164,31 @@ def compare_models(question):
|
|
175 |
graphrag_response = get_graphrag_response(question)
|
176 |
return rag_response, graphrag_response
|
177 |
|
178 |
-
def store_feedback(feedback, question, rag_response, graphrag_response):
|
179 |
-
print("Storing feedback...")
|
180 |
-
print(f"Question: {question}")
|
181 |
-
print(f"RAG Response: {rag_response}")
|
182 |
-
print(f"GraphRAG Response: {graphrag_response}")
|
183 |
-
print(f"User Feedback: {feedback}")
|
184 |
-
|
185 |
-
with open("feedback.txt", "a", encoding='utf-8') as f:
|
186 |
-
f.write(f"Question: {question}\n")
|
187 |
-
f.write(f"RAG Response: {rag_response}\n")
|
188 |
-
f.write(f"GraphRAG Response: {graphrag_response}\n")
|
189 |
-
f.write(f"User Feedback: {feedback}\n\n")
|
190 |
-
|
191 |
def handle_feedback(feedback, question, rag_response, graphrag_response):
|
192 |
store_feedback(feedback, question, rag_response, graphrag_response)
|
193 |
return "Feedback stored successfully!"
|
194 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
with gr.Blocks() as demo:
|
196 |
gr.Markdown("## Thai Rice Assistant A/B Testing")
|
197 |
|
@@ -209,6 +206,8 @@ with gr.Blocks() as demo:
|
|
209 |
choice = gr.Radio(["A is better", "B is better", "Tie", "Both Bad"], label="Which response is better?")
|
210 |
send_feedback_btn = gr.Button("Send Feedback")
|
211 |
|
|
|
|
|
212 |
def on_submit(question):
|
213 |
rag_response, graphrag_response = compare_models(question)
|
214 |
return rag_response, graphrag_response
|
@@ -220,6 +219,6 @@ with gr.Blocks() as demo:
|
|
220 |
return handle_feedback(feedback, question, rag_response, graphrag_response)
|
221 |
|
222 |
submit_btn.click(on_submit, inputs=[question_input], outputs=[rag_output, graphrag_output])
|
223 |
-
send_feedback_btn.click(on_feedback, inputs=[choice], outputs=[])
|
224 |
|
225 |
-
demo.launch(share=True)
|
|
|
14 |
import networkx as nx
|
15 |
import pinecone
|
16 |
import os
|
17 |
+
from datetime import datetime
|
|
|
18 |
|
19 |
# RAG Setup
|
20 |
text_path = r"./text_chunks.txt"
|
|
|
33 |
text = f.read()
|
34 |
return [Document(page_content=text)]
|
35 |
|
|
|
|
|
36 |
embeddings = HuggingFaceEmbeddings()
|
37 |
|
38 |
from langchain.llms import HuggingFaceHub
|
|
|
66 |
groq_api_key='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'
|
67 |
)
|
68 |
|
|
|
|
|
|
|
|
|
69 |
template = """
|
70 |
You are a Thai rice assistants. These Human will ask you a questions about Thai Rice.
|
71 |
+
Answer the question only in Thai languages.
|
72 |
Use following piece of context to answer the question.
|
73 |
If you don't know the answer, just say you don't know.
|
74 |
Keep the answer within 2 sentences and concise.
|
|
|
75 |
Context: {context}
|
76 |
Question: {question}
|
77 |
Answer:
|
|
|
78 |
"""
|
79 |
|
80 |
prompt = PromptTemplate(
|
|
|
93 |
loader = CustomTextLoader(r'./text_chunks.txt', encoding='utf-8')
|
94 |
documents = loader.load()
|
95 |
|
|
|
|
|
96 |
rag_chain = (
|
97 |
{"context": docsearch.as_retriever(), "question": RunnablePassthrough()}
|
98 |
| prompt
|
|
|
164 |
graphrag_response = get_graphrag_response(question)
|
165 |
return rag_response, graphrag_response
|
166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
def handle_feedback(feedback, question, rag_response, graphrag_response):
|
168 |
store_feedback(feedback, question, rag_response, graphrag_response)
|
169 |
return "Feedback stored successfully!"
|
170 |
|
171 |
+
def store_feedback(feedback, question, rag_response, graphrag_response):
|
172 |
+
# Ensure the feedback directory exists
|
173 |
+
os.makedirs('feedback', exist_ok=True)
|
174 |
+
|
175 |
+
# Generate a unique filename based on the current timestamp
|
176 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
177 |
+
filename = f'feedback/feedback_{timestamp}.txt'
|
178 |
+
|
179 |
+
# Prepare the content to be written
|
180 |
+
content = f"Timestamp: {datetime.now()}\n"
|
181 |
+
content += f"Question: {question}\n"
|
182 |
+
content += f"RAG Response: {rag_response}\n"
|
183 |
+
content += f"GraphRAG Response: {graphrag_response}\n"
|
184 |
+
content += f"User Feedback: {feedback}\n\n"
|
185 |
+
|
186 |
+
# Write the feedback to a new file
|
187 |
+
with open(filename, 'w', encoding='utf-8') as f:
|
188 |
+
f.write(content)
|
189 |
+
|
190 |
+
print(f"Feedback stored in {filename}")
|
191 |
+
|
192 |
with gr.Blocks() as demo:
|
193 |
gr.Markdown("## Thai Rice Assistant A/B Testing")
|
194 |
|
|
|
206 |
choice = gr.Radio(["A is better", "B is better", "Tie", "Both Bad"], label="Which response is better?")
|
207 |
send_feedback_btn = gr.Button("Send Feedback")
|
208 |
|
209 |
+
feedback_output = gr.Textbox(label="Feedback Status", interactive=False)
|
210 |
+
|
211 |
def on_submit(question):
|
212 |
rag_response, graphrag_response = compare_models(question)
|
213 |
return rag_response, graphrag_response
|
|
|
219 |
return handle_feedback(feedback, question, rag_response, graphrag_response)
|
220 |
|
221 |
submit_btn.click(on_submit, inputs=[question_input], outputs=[rag_output, graphrag_output])
|
222 |
+
send_feedback_btn.click(on_feedback, inputs=[choice], outputs=[feedback_output])
|
223 |
|
224 |
+
demo.launch(share=True)
|