Spaces:
Runtime error
Runtime error
import time | |
import streamlit as st | |
def display_chat_history(model_name: str): | |
for message in st.session_state[model_name]: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
def chat_input(model_name: str): | |
if prompt := st.chat_input("Say something"): | |
# Display user message in chat message container | |
st.chat_message("user").markdown(prompt) | |
# Add user message to chat history | |
st.session_state[model_name].append({"role": "user", "content": prompt}) | |
return prompt | |
def display_bot_msg(model_name: str, bot_response: str): | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
# simulate the chatbot "thinking" before responding | |
# (or stream its response) | |
for chunk in bot_response.split(): | |
full_response += chunk + " " | |
time.sleep(0.05) | |
# add a blinking cursor to simulate typing | |
message_placeholder.markdown(full_response + "▌") | |
message_placeholder.markdown(full_response) | |
# st.markdown(response) | |
# Add assistant response to chat history | |
st.session_state[model_name].append( | |
{"model_name": model_name, "role": "assistant", "content": full_response} | |
) | |
def chatbox(model_name: str, model: None): | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
if (message["model_name"] == model_name): | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
if prompt := st.chat_input("Say something"): | |
# Display user message in chat message container | |
st.chat_message("user").markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"model_name": model_name, "role": "user", "content": prompt}) | |
with st.spinner("Processing your query..."): | |
bot_response = model.get_response(prompt) | |
print("bot: ", bot_response) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
# simulate the chatbot "thinking" before responding | |
# (or stream its response) | |
for chunk in bot_response.split(): | |
full_response += chunk + " " | |
time.sleep(0.05) | |
# add a blinking cursor to simulate typing | |
message_placeholder.markdown(full_response + "▌") | |
message_placeholder.markdown(full_response) | |
# st.markdown(response) | |
# Add assistant response to chat history | |
st.session_state.messages.append( | |
{"model_name": model_name, "role": "assistant", "content": full_response} | |
) | |
# Scroll to the bottom of the chat container | |
# st.markdown( | |
# """ | |
# <script> | |
# const chatContainer = document.getElementsByClassName("css-1n76uvr")[0]; | |
# chatContainer.scrollTop = chatContainer.scrollHeight; | |
# </script> | |
# """, | |
# unsafe_allow_html=True, | |
# ) | |