budhadityac24 commited on
Commit
6aed8e6
1 Parent(s): 7df4de5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -67
app.py CHANGED
@@ -147,7 +147,32 @@ def question_create(json_template):
147
  answer = ""
148
  for chunk in completion:
149
  answer += chunk.choices[0].delta.content or ""
150
- return answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  def answer_refill(questions,answers,obs_json_template,bizobj_json_template):
153
 
@@ -190,7 +215,7 @@ def answer_refill(questions,answers,obs_json_template,bizobj_json_template):
190
  }
191
  ],
192
  temperature=1,
193
- max_tokens=4610,
194
  top_p=1,
195
  stream=True,
196
  stop=None,
@@ -218,8 +243,8 @@ def executive_summary(json_template):
218
  "content": str(json_template)
219
  }
220
  ],
221
- temperature=1,
222
- max_tokens=4610,
223
  top_p=1,
224
  stream=True,
225
  stop=None,
@@ -229,11 +254,26 @@ def executive_summary(json_template):
229
  final_summ+=chunk.choices[0].delta.content or ""
230
  return final_summ
231
 
232
- # Streamlit application
233
  def main():
234
  st.title("Qualitas Sales Data Collection Chatbot")
235
  st.caption("Welcome to the Qualitas Bot. First upload a PDF document which should be customer correspondence, detailing some requirements. Also sometimes the Submit button for the questions is a bit sticky. So You might have to click it twice!")
 
236
  # Initialize session state variables
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  if "file_processed" not in st.session_state:
238
  st.session_state.file_processed = False
239
  if "questionnaire_started" not in st.session_state:
@@ -247,72 +287,71 @@ def main():
247
  if "questionnaire_complete" not in st.session_state:
248
  st.session_state.questionnaire_complete = False
249
 
250
- # File uploader for the PDF
251
- uploaded_file = st.file_uploader("Upload a PDF document", type="pdf")
252
- answers=[]
253
- final_bizobj_json=[]
254
- bizobj=[]
255
- obs=[]
256
- final_obs_json=[]
257
- if uploaded_file is not None and not st.session_state.file_processed:
258
- st.write("Processing your document...")
259
-
260
- # Simulate file processing (replace with actual logic)
261
- st.session_state.text = extract_text_from_pdf(uploaded_file)
262
- st.session_state.classification_result = classification_LLM(st.session_state.text)
263
- json_path='observationsJSON.json'
264
- with open(json_path, 'r') as file:
265
- obs_json_template = json.load(file)
266
- final_obs_json=obsjsoncreate(obs_json_template,st.session_state.classification_result,st.session_state.text)
267
- st.session_state.obs=final_obs_json
268
- json_path='BizObjJSON.json'
269
- with open(json_path, 'r') as file:
270
- bizobj_json_template = json.load(file)
271
- final_bizobj_json=bizobjjsoncreate(bizobj_json_template,st.session_state.text)
272
- st.session_state.bizobj=final_bizobj_json
273
- questionobs=question_create(final_obs_json)
274
- questionbizobj=question_create(final_bizobj_json)
275
- st.session_state.questions = ast.literal_eval(questionbizobj) + ast.literal_eval(questionobs)
276
- # st.write(st.session_state.questions)
277
-
278
- # Mark file as processed
279
- st.session_state.file_processed = True
280
- st.success("Document processed successfully.")
281
-
282
- def show_question():
283
- if st.session_state.current_question_index < len(st.session_state.questions):
284
- st.write(st.session_state.questions[st.session_state.current_question_index])
285
- user_input = st.text_input("Your answer:", key=f"input_{st.session_state.current_question_index}")
286
- submit_button = st.button("Submit", key=f"submit_{st.session_state.current_question_index}")
287
-
288
- if submit_button:
289
- # Store the answer
290
- st.session_state.messages.append({"role": "user", "content": user_input})
291
- # Move to the next question
292
- st.session_state.current_question_index += 1
293
-
294
- # Check if all questions have been answered
295
- if st.session_state.current_question_index >= len(st.session_state.questions):
296
- st.session_state.questionnaire_complete = True
297
-
298
- # Main loop to control the flow of the questionnaire
299
- if not st.session_state.questionnaire_complete:
300
  show_question()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  else:
302
  # Display the answers after completing the questionnaire
303
  answers = [message["content"] for message in st.session_state.messages if message["role"] == "user"]
304
- # st.write(st.session_state.questions)
305
- # st.write(answers)
306
- # st.subheader("Answers")
307
- # st.write(answers)
308
- if st.session_state.questionnaire_complete:
309
- completed_json=answer_refill(st.session_state.questions,answers,st.session_state.obs,st.session_state.bizobj)
310
- # st.write(completed_json)
311
- exec_summ=executive_summary(completed_json)
312
  st.write(exec_summ)
313
-
314
 
315
-
316
-
317
  if __name__ == "__main__":
318
- main()
 
147
  answer = ""
148
  for chunk in completion:
149
  answer += chunk.choices[0].delta.content or ""
150
+
151
+
152
+ client = Groq()
153
+ completion = client.chat.completions.create(
154
+ model="llama-3.1-70b-versatile",
155
+ messages=[
156
+ {
157
+ "role": "system",
158
+ "content": "You are an experienced writer. You will be given an array of questions. \nSome questions will ask to upload images. Ignore any of these type of questions.\nSome questions ask about different identities or descriptions of the same thing. I want you o merge the questions so as to ask input from them once.\nConvert all questions so that more of a professional but also a bit of a funny tone is maintained. \nRETURN AN ARRAY OF THE QUESTIONS ONLY. DO NOT RETURN ANYTHING ELSE. "
159
+ },
160
+ {
161
+ "role": "user",
162
+ "content": answer
163
+ }
164
+ ],
165
+ temperature=0.73,
166
+ max_tokens=5840,
167
+ top_p=1,
168
+ stream=True,
169
+ stop=None,
170
+ )
171
+ final=""
172
+ for chunk in completion:
173
+ final+=chunk.choices[0].delta.content or ""
174
+
175
+ return final
176
 
177
  def answer_refill(questions,answers,obs_json_template,bizobj_json_template):
178
 
 
215
  }
216
  ],
217
  temperature=1,
218
+ max_tokens=7610,
219
  top_p=1,
220
  stream=True,
221
  stop=None,
 
243
  "content": str(json_template)
244
  }
245
  ],
246
+ temperature=0.73,
247
+ max_tokens=5610,
248
  top_p=1,
249
  stream=True,
250
  stop=None,
 
254
  final_summ+=chunk.choices[0].delta.content or ""
255
  return final_summ
256
 
257
+
258
  def main():
259
  st.title("Qualitas Sales Data Collection Chatbot")
260
  st.caption("Welcome to the Qualitas Bot. First upload a PDF document which should be customer correspondence, detailing some requirements. Also sometimes the Submit button for the questions is a bit sticky. So You might have to click it twice!")
261
+
262
  # Initialize session state variables
263
+ init_session_state()
264
+
265
+ # File uploader for the PDF
266
+ uploaded_file = st.file_uploader("Upload a PDF document", type="pdf")
267
+ if uploaded_file is not None and not st.session_state.file_processed:
268
+ st.write("Processing your document...")
269
+ process_document(uploaded_file)
270
+ # Display the first question immediately after processing the document
271
+ show_question()
272
+
273
+ # Simulate chat interaction
274
+ chat_interaction()
275
+
276
+ def init_session_state():
277
  if "file_processed" not in st.session_state:
278
  st.session_state.file_processed = False
279
  if "questionnaire_started" not in st.session_state:
 
287
  if "questionnaire_complete" not in st.session_state:
288
  st.session_state.questionnaire_complete = False
289
 
290
+ def process_document(uploaded_file):
291
+ # Simulate file processing (replace with actual logic)
292
+ st.session_state.text = extract_text_from_pdf(uploaded_file)
293
+ st.session_state.classification_result = classification_LLM(st.session_state.text)
294
+ json_path='observationsJSON.json'
295
+ with open(json_path, 'r') as file:
296
+ obs_json_template = json.load(file)
297
+ final_obs_json = obsjsoncreate(obs_json_template, st.session_state.classification_result, st.session_state.text)
298
+ st.session_state.obs = final_obs_json
299
+ json_path='BizObjJSON.json'
300
+ with open(json_path, 'r') as file:
301
+ bizobj_json_template = json.load(file)
302
+ final_bizobj_json = bizobjjsoncreate(bizobj_json_template, st.session_state.text)
303
+ st.session_state.bizobj = final_bizobj_json
304
+ questionobs = question_create(final_obs_json)
305
+ questionbizobj = question_create(final_bizobj_json)
306
+ st.session_state.questions = ast.literal_eval(questionbizobj) + ast.literal_eval(questionobs)
307
+ st.write(st.session_state.questions)
308
+ # Mark file as processed
309
+ st.session_state.file_processed = True
310
+ st.success("Document processed successfully.")
311
+
312
+ def chat_interaction():
313
+ # Display chat messages from history in the correct order
314
+ for message in st.session_state.messages[::-1]:
315
+ with st.chat_message(message["role"]):
316
+ st.markdown(message["content"])
317
+
318
+ # React to user input
319
+ if prompt := st.chat_input("What is your question?"):
320
+ # Display user message in chat message container
321
+ st.chat_message("user").markdown(prompt)
322
+
323
+ # Add user message to chat history
324
+ st.session_state.messages.append({"role": "user", "content": prompt})
325
+
326
+ # Process the user's input and show the next question
 
 
 
 
 
 
 
 
 
 
 
 
 
327
  show_question()
328
+
329
+ def show_question():
330
+ if st.session_state.current_question_index < len(st.session_state.questions):
331
+ # Display the next question
332
+ with st.chat_message("assistant"):
333
+ st.markdown(st.session_state.questions[st.session_state.current_question_index])
334
+
335
+ # Add the question to the chat history
336
+ st.session_state.messages.append({"role": "assistant", "content": st.session_state.questions[st.session_state.current_question_index]})
337
+
338
+ # Move to the next question
339
+ st.session_state.current_question_index += 1
340
+
341
+ # Check if all questions have been answered
342
+ if st.session_state.current_question_index >= len(st.session_state.questions):
343
+ st.session_state.questionnaire_complete = True
344
+
345
  else:
346
  # Display the answers after completing the questionnaire
347
  answers = [message["content"] for message in st.session_state.messages if message["role"] == "user"]
348
+ # with st.chat_message("assistant"):
349
+ # st.subheader("Answers")
350
+ # st.write(answers)
351
+
352
+ completed_json = answer_refill(st.session_state.questions, answers, st.session_state.obs, st.session_state.bizobj)
353
+ exec_summ = executive_summary(completed_json)
 
 
354
  st.write(exec_summ)
 
355
 
 
 
356
  if __name__ == "__main__":
357
+ main()