Spaces:
Runtime error
Runtime error
import streamlit as st | |
import replicate | |
import os | |
from transformers import AutoTokenizer | |
# App title | |
st.set_page_config(page_title="Snowflake Arctic") | |
def main(): | |
"""Execution starts here.""" | |
get_replicate_api_token() | |
display_sidebar_ui() | |
init_chat_history() | |
display_chat_messages() | |
get_and_process_prompt() | |
def get_replicate_api_token(): | |
os.environ['REPLICATE_API_TOKEN'] = st.secrets['REPLICATE_API_TOKEN'] | |
def display_sidebar_ui(): | |
with st.sidebar: | |
st.title('Snowflake Arctic') | |
st.subheader("Adjust model parameters") | |
st.slider('temperature', min_value=0.01, max_value=5.0, value=0.3, | |
step=0.01, key="temperature") | |
st.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01, | |
key="top_p") | |
st.button('Clear chat history', on_click=clear_chat_history) | |
st.sidebar.caption('Build your own app powered by Arctic and [enter to win](https://arctic-streamlit-hackathon.devpost.com/) $10k in prizes.') | |
st.subheader("About") | |
st.caption('Built by [Snowflake](https://snowflake.com/) to demonstrate [Snowflake Arctic](https://www.snowflake.com/blog/arctic-open-and-efficient-foundation-language-models-snowflake). App hosted on [Streamlit Community Cloud](https://streamlit.io/cloud). Model hosted by [Replicate](https://replicate.com/snowflake/snowflake-arctic-instruct).') | |
# # # Uncomment to show debug info | |
# st.subheader("Debug") | |
# st.write(st.session_state) | |
def clear_chat_history(): | |
st.session_state.messages = [{"role": "assistant", "content": "Hi. I'm Arctic, a new, efficient, intelligent, and truly open language model created by Snowflake AI Research. Ask me anything."}] | |
st.session_state.chat_aborted = False | |
def init_chat_history(): | |
"""Create a st.session_state.messages list to store chat messages""" | |
if "messages" not in st.session_state: | |
clear_chat_history() | |
check_safety() | |
def display_chat_messages(): | |
# Set assistant icon to Snowflake logo | |
icons = {"assistant": "./Snowflake_Logomark_blue.svg", "user": "⛷️"} | |
# Display the messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"], avatar=icons[message["role"]]): | |
st.write(message["content"]) | |
def get_tokenizer(): | |
"""Get a tokenizer to make sure we're not sending too much text | |
text to the Model. Eventually we will replace this with ArcticTokenizer | |
""" | |
return AutoTokenizer.from_pretrained("huggyllama/llama-7b") | |
def get_llamaguard_deployment(): | |
return replicate.deployments.get("snowflake/llamaguard") | |
def check_safety(disable=False) -> bool: | |
if disable: | |
return True | |
# with st.spinner("Checking prompt safety ..."): | |
deployment = get_llamaguard_deployment() | |
conversation_history = st.session_state.messages | |
prediction = deployment.predictions.create( | |
input = { "prompt": f"[INST] Task: Check if there is unsafe content in 'Agent' messages in conversations according our safety policy with the below categories.\n\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: Violence and Hate.\nShould not\n- Help people plan or engage in violence.\n- Encourage people to engage in violence.\n- Express hateful, derogatory, or demeaning sentiments against people based on sensitive personal characteristics like their race, color, religion, national origin, sexual orientation, gender, gender identity, or disability.\n- Encourage discrimination against people based on those characteristics and should not use slurs or create insults based on those characteristics.\nCan\n- Provide information on violence and discrimination.\n- Discuss the topics of hate and violence, and can discuss historical events involving violence.\nO2: Sexual Content.\nShould not\n- Engage in sexually explicit (i.e., erotic) conversations and should not encourage people to engage in sexual activities.\nCan\n- Discuss topics related to sexuality and sexual education.\n- Provide general information regarding sexual activities.\n- Provide guidance on topics related to sexual health.\nO3: Criminal Planning.\nShould not\n- Help people plan or engage in criminal activities like theft, kidnapping, or financial crimes, especially when the person indicates that they may act on the information provided to them.\n- Encourage people to engage in criminal activities or condone criminal behavior.\nCan\n- Help people understand how crimes are committed, provided that the response is not so detailed that it could be used to provide a roadmap for future crimes.\n- Help people understand how systems work that are used to prevent crimes, provided those descriptions do not provide detailed information on how to defeat those systems.\nO4: Guns and Illegal Weapons.\nShould not\n- Assist in the planning or execution of firearm or illegal weapons crimes, especially when the person indicates that they may act on the information provided to them.\n- Encourage people to engage in firearm or illegal weapons crimes or condone such behavior.\nCan\n- Discuss firearms and the arguments for and against firearm ownership.\nO5: Regulated or Controlled Substances.\nShould not\n- Encourage or assist people in the trafficking, consumption, or use of regulated or controlled substances, including but not limited to illegal drugs, tobacco, alcohol, and cannabis.\n- Assist or encourage people to create such substances when it is illegal to do so.\nCan\n- Discuss the use of regulated or controlled substances, and can provide general information on those substances (ex: what their chemical composition is and what their effects are).\n- Provide information on how those substances are manufactured, provided that the information is not so detailed as to provide step-by-step instructions for their creation.\nO6: Self-Harm.\nShould not\n- Encourage people to harm themselves.\n- Romanticize or condone self-harm or suicide.\n- Provide information regarding the methods of suicide or self-harm.\n- Provide any information or advice, other than information regarding appropriate health resources, to people who express or imply that they wish to harm themselves.\nCan\n- Provide general information regarding self-harm and suicide (ex: statistics, the mental health challenges faced by those who take these actions).\nShould\n- Provide information regarding appropriate health resources to people who imply or express the desire to harm themselves.\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\n{conversation_history}\n\n<END CONVERSATION>\n\nProvide your safety assessment for Agent in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST]" } | |
) | |
prediction.wait() | |
output = prediction.output | |
if output is not None and "unsafe" in output: | |
return False | |
else: | |
return True | |
def get_num_tokens(prompt): | |
"""Get the number of tokens in a given prompt""" | |
tokenizer = get_tokenizer() | |
tokens = tokenizer.tokenize(prompt) | |
return len(tokens) | |
def abort_chat(error_message: str): | |
"""Display an error message requiring the chat to be cleared. | |
Forces a rerun of the app.""" | |
assert error_message, "Error message must be provided." | |
error_message = f":red[{error_message}]" | |
if st.session_state.messages[-1]["role"] != "assistant": | |
st.session_state.messages.append({"role": "assistant", "content": error_message}) | |
else: | |
st.session_state.messages[-1]["content"] = error_message | |
st.session_state.chat_aborted = True | |
st.rerun() | |
def get_and_process_prompt(): | |
"""Get the user prompt and process it""" | |
# Generate a new response if last message is not from assistant | |
if st.session_state.messages[-1]["role"] != "assistant": | |
with st.chat_message("assistant", avatar="./Snowflake_Logomark_blue.svg"): | |
response = generate_arctic_response() | |
st.write_stream(response) | |
if st.session_state.chat_aborted: | |
st.button('Reset chat', on_click=clear_chat_history, key="clear_chat_history") | |
st.chat_input(disabled=True) | |
elif prompt := st.chat_input(): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
st.rerun() | |
def generate_arctic_response(): | |
"""String generator for the Snowflake Arctic response.""" | |
prompt = [] | |
for dict_message in st.session_state.messages: | |
if dict_message["role"] == "user": | |
prompt.append("<|im_start|>user\n" + dict_message["content"] + "<|im_end|>") | |
else: | |
prompt.append("<|im_start|>assistant\n" + dict_message["content"] + "<|im_end|>") | |
prompt.append("<|im_start|>assistant") | |
prompt.append("") | |
prompt_str = "\n".join(prompt) | |
num_tokens = get_num_tokens(prompt_str) | |
max_tokens = 1500 | |
if num_tokens >= max_tokens: | |
abort_chat(f"Conversation length too long. Please keep it under {max_tokens} tokens.") | |
st.session_state.messages.append({"role": "assistant", "content": ""}) | |
for event_index, event in enumerate(replicate.stream("snowflake/snowflake-arctic-instruct", | |
input={"prompt": prompt_str, | |
"prompt_template": r"{prompt}", | |
"temperature": st.session_state.temperature, | |
"top_p": st.session_state.top_p, | |
})): | |
if (event_index + 0) % 50 == 0: | |
if not check_safety(): | |
abort_chat("I cannot answer this question.") | |
st.session_state.messages[-1]["content"] += str(event) | |
yield str(event) | |
# Final safety check... | |
if not check_safety(): | |
abort_chat("I cannot answer this question.") | |
if __name__ == "__main__": | |
main() | |