Gampanut commited on
Commit
6919d03
1 Parent(s): 34bb96a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -49
app.py CHANGED
@@ -34,6 +34,14 @@ def store_feedback_in_sheet(feedback, question, rag_response, graphrag_response)
34
  row = [timestamp, question, rag_response, graphrag_response, feedback]
35
  sheet.append_row(row)
36
 
 
 
 
 
 
 
 
 
37
  # RAG Setup
38
  text_path = r"./text_chunks.txt"
39
  loader = TextLoader(text_path, encoding='utf-8')
@@ -41,16 +49,6 @@ documents = loader.load()
41
  text_splitter = CharacterTextSplitter(chunk_size=3000, chunk_overlap=4)
42
  docs = text_splitter.split_documents(documents)
43
 
44
- class CustomTextLoader(TextLoader):
45
- def __init__(self, file_path: str, encoding: str = 'utf-8'):
46
- super().__init__(file_path)
47
- self.encoding = encoding
48
-
49
- def load(self):
50
- with open(self.file_path, encoding=self.encoding) as f:
51
- text = f.read()
52
- return [Document(page_content=text)]
53
-
54
  embeddings = HuggingFaceEmbeddings()
55
 
56
  from langchain.llms import HuggingFaceHub
@@ -107,17 +105,6 @@ rag_chain = (
107
  | StrOutputParser()
108
  )
109
 
110
- class ChatBot():
111
- loader = CustomTextLoader(r"./text_chunks.txt", encoding='utf-8')
112
- documents = loader.load()
113
-
114
- rag_chain = (
115
- {"context": docsearch.as_retriever(), "question": RunnablePassthrough()}
116
- | prompt
117
- | llm
118
- | StrOutputParser()
119
- )
120
-
121
  graphrag_llm = ChatGroq(
122
  model="Llama3-8b-8192",
123
  temperature=0,
@@ -182,43 +169,29 @@ def compare_models(question):
182
  graphrag_response = get_graphrag_response(question)
183
  return rag_response, graphrag_response
184
 
185
- def handle_feedback(feedback, question, rag_response, graphrag_response):
186
- try:
187
- store_feedback_in_sheet(feedback, question, rag_response, graphrag_response)
188
- return "ส่งสำเร็จ!"
189
- except Exception as e:
190
- return f"Error: {e}"
191
-
192
  with gr.Blocks() as demo:
193
- gr.Markdown("## Thai Rice Assistant A/B Testing")
194
-
195
  with gr.Row():
196
  with gr.Column():
197
- question_input = gr.Textbox(label="ถามคำถามเกี่ยวกับข้าว:")
198
- submit_btn = gr.Button("ถาม")
199
-
200
- with gr.Column():
201
  rag_output = gr.Textbox(label="Model A", interactive=False)
202
  graphrag_output = gr.Textbox(label="Model B", interactive=False)
 
 
 
203
 
204
- with gr.Row():
205
  with gr.Column():
206
- choice = gr.Radio(["A ดีกว่า", "B ดีกว่า", "เท่ากัน", "แย่ทั้งคู่"], label="คำตอบไหนดีกว่ากัน?")
207
- send_feedback_btn = gr.Button("ส่ง")
208
-
209
- feedback_output = gr.Textbox(label="Feedback Status", interactive=False)
210
 
211
- def on_submit(question):
212
- rag_response, graphrag_response = compare_models(question)
213
- return rag_response, graphrag_response
214
 
215
- def on_feedback(feedback):
216
- question = question_input.value
217
- rag_response = rag_output.value
218
- graphrag_response = graphrag_output.value
219
- return handle_feedback(feedback, question, rag_response, graphrag_response)
220
 
221
- submit_btn.click(on_submit, inputs=[question_input], outputs=[rag_output, graphrag_output])
222
- send_feedback_btn.click(on_feedback, inputs=[choice], outputs=[feedback_output])
223
 
224
  demo.launch(share=True)
 
34
  row = [timestamp, question, rag_response, graphrag_response, feedback]
35
  sheet.append_row(row)
36
 
37
+ def load_data():
38
+ data = sheet.get_all_records()
39
+ return data[-10:], len(data)
40
+
41
+ def add_review(question, rag_response, graphrag_response, feedback):
42
+ store_feedback_in_sheet(feedback, question, rag_response, graphrag_response)
43
+ return load_data()
44
+
45
  # RAG Setup
46
  text_path = r"./text_chunks.txt"
47
  loader = TextLoader(text_path, encoding='utf-8')
 
49
  text_splitter = CharacterTextSplitter(chunk_size=3000, chunk_overlap=4)
50
  docs = text_splitter.split_documents(documents)
51
 
 
 
 
 
 
 
 
 
 
 
52
  embeddings = HuggingFaceEmbeddings()
53
 
54
  from langchain.llms import HuggingFaceHub
 
105
  | StrOutputParser()
106
  )
107
 
 
 
 
 
 
 
 
 
 
 
 
108
  graphrag_llm = ChatGroq(
109
  model="Llama3-8b-8192",
110
  temperature=0,
 
169
  graphrag_response = get_graphrag_response(question)
170
  return rag_response, graphrag_response
171
 
172
+ # Gradio Interface
 
 
 
 
 
 
173
  with gr.Blocks() as demo:
 
 
174
  with gr.Row():
175
  with gr.Column():
176
+ question_input = gr.Textbox(label="ถามคำถามเกี่ยวกับข้าว:", placeholder="Enter your question about Thai rice")
177
+ submit_btn = gr.Button(value="ถาม")
 
 
178
  rag_output = gr.Textbox(label="Model A", interactive=False)
179
  graphrag_output = gr.Textbox(label="Model B", interactive=False)
180
+ feedback = gr.Radio(label="Which response is better?", choices=["A ดีกว่า", "B ดีกว่า", "เท่ากัน", "แย่ทั้งคู่"])
181
+ comments = gr.Textbox(label="Comments", lines=10, placeholder="Any additional comments?")
182
+ submit_feedback = gr.Button(value="Submit Feedback")
183
 
 
184
  with gr.Column():
185
+ data = gr.Dataframe(label="Most recently created 10 rows")
186
+ count = gr.Number(label="Total number of reviews")
 
 
187
 
188
+ # Function to handle question submission and display responses
189
+ submit_btn.click(fn=compare_models, inputs=[question_input], outputs=[rag_output, graphrag_output])
 
190
 
191
+ # Function to handle feedback submission and update data display
192
+ submit_feedback.click(fn=add_review, inputs=[question_input, rag_output, graphrag_output, feedback], outputs=[data, count])
 
 
 
193
 
194
+ # Load initial data display
195
+ demo.load(fn=load_data, inputs=None, outputs=[data, count])
196
 
197
  demo.launch(share=True)