Zwea Htet commited on
Commit
23a229c
1 Parent(s): 91caeb5

updated chatbot ui in streamlit

Browse files
Files changed (3) hide show
  1. app.py +55 -17
  2. requirements.txt +0 -1
  3. utils/util.py +6 -12
app.py CHANGED
@@ -1,13 +1,18 @@
 
 
1
  import os
 
2
 
3
  import openai
4
  import requests
5
  import streamlit as st
6
- from streamlit_chat import message
7
 
8
  from models import bloom
9
  from utils.util import *
10
 
 
 
 
11
  st.title("Welcome to RegBotBeta")
12
  st.header("Powered by `LlamaIndex🦙` and `OpenAI API`")
13
 
@@ -30,19 +35,52 @@ if api_key:
30
 
31
  st.write("---")
32
  if index:
33
- input_text = st.text_area("Ask your question")
34
-
35
- if input_text:
36
- if st.button("Ask"):
37
- st.session_state.messages.append(("User", input_text))
38
- with st.spinner("Processing your query..."):
39
- bot_response = get_response(index, input_text)
40
- print("bot: ", bot_response)
41
- st.session_state.messages.append(("Bot", bot_response))
42
-
43
- # Display previous messages
44
- msg_key = 0
45
- for sender, msg in st.session_state.messages[::-1]:
46
- is_user = sender == "User"
47
- message(str(msg), is_user, key=str(msg_key) + f"_{sender}")
48
- msg_key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
2
+
3
  import os
4
+ import time
5
 
6
  import openai
7
  import requests
8
  import streamlit as st
 
9
 
10
  from models import bloom
11
  from utils.util import *
12
 
13
+ # from streamlit_chat import message
14
+
15
+
16
  st.title("Welcome to RegBotBeta")
17
  st.header("Powered by `LlamaIndex🦙` and `OpenAI API`")
18
 
 
35
 
36
  st.write("---")
37
  if index:
38
+ # Display chat messages from history on app rerun
39
+ for message in st.session_state.messages:
40
+ with st.chat_message(message["role"]):
41
+ st.markdown(message["content"])
42
+
43
+ if prompt := st.chat_input("Say something"):
44
+ # Display user message in chat message container
45
+ st.chat_message("user").markdown(prompt)
46
+
47
+ # Add user message to chat history
48
+ st.session_state.messages.append({"role": "user", "content": prompt})
49
+
50
+ with st.spinner("Processing your query..."):
51
+ bot_response = get_response(index, prompt)
52
+
53
+ print("bot: ", bot_response)
54
+
55
+ # Display assistant response in chat message container
56
+ with st.chat_message("assistant"):
57
+ message_placeholder = st.empty()
58
+ full_response = ""
59
+
60
+ # simulate the chatbot "thinking" before responding
61
+ # (or stream its response)
62
+ for chunk in bot_response.split():
63
+ full_response += chunk + " "
64
+ time.sleep(0.05)
65
+
66
+ # add a blinking cursor to simulate typing
67
+ message_placeholder.markdown(full_response + "▌")
68
+
69
+ message_placeholder.markdown(full_response)
70
+ # st.markdown(response)
71
+
72
+ # Add assistant response to chat history
73
+ st.session_state.messages.append(
74
+ {"role": "assistant", "content": full_response}
75
+ )
76
+
77
+ # Scroll to the bottom of the chat container
78
+ # st.markdown(
79
+ # """
80
+ # <script>
81
+ # const chatContainer = document.getElementsByClassName("css-1n76uvr")[0];
82
+ # chatContainer.scrollTop = chatContainer.scrollHeight;
83
+ # </script>
84
+ # """,
85
+ # unsafe_allow_html=True,
86
+ # )
requirements.txt CHANGED
@@ -8,5 +8,4 @@ openai
8
  faiss-cpu
9
  python-dotenv
10
  streamlit
11
- streamlit-chat
12
  huggingface_hub
 
8
  faiss-cpu
9
  python-dotenv
10
  streamlit
 
11
  huggingface_hub
utils/util.py CHANGED
@@ -5,29 +5,23 @@ def validate(token: str):
5
  api_endpoint = "https://api.openai.com/v1/chat/completions"
6
  api_key = token
7
 
8
- headers = {
9
- "Content-Type" : "application/json",
10
- "Authorization": f"Bearer {api_key}"
11
- }
12
 
13
- messages = [
14
- {"role": "user", "content": "Say this is a test!"}
15
- ]
16
 
17
- data = {
18
- "model": "gpt-3.5-turbo",
19
- "messages": messages
20
- }
21
 
22
  response = requests.post(api_endpoint, json=data, headers=headers)
23
  return response
24
 
 
25
  def create_index(model):
26
  index = model.initialize_index("bloomLlama")
27
  return index
28
 
 
29
  def get_response(vector_index, query_str):
30
  print("query_str: ", query_str)
31
  query_engine = vector_index.as_query_engine()
32
  response = query_engine.query(query_str)
33
- return response
 
5
  api_endpoint = "https://api.openai.com/v1/chat/completions"
6
  api_key = token
7
 
8
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"}
 
 
 
9
 
10
+ messages = [{"role": "user", "content": "Say this is a test!"}]
 
 
11
 
12
+ data = {"model": "gpt-3.5-turbo", "messages": messages}
 
 
 
13
 
14
  response = requests.post(api_endpoint, json=data, headers=headers)
15
  return response
16
 
17
+
18
  def create_index(model):
19
  index = model.initialize_index("bloomLlama")
20
  return index
21
 
22
+
23
  def get_response(vector_index, query_str):
24
  print("query_str: ", query_str)
25
  query_engine = vector_index.as_query_engine()
26
  response = query_engine.query(query_str)
27
+ return str(response)