Abbeite commited on
Commit
a9a4fe5
1 Parent(s): bc5dafd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -2,7 +2,7 @@ import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
  # Streamlit's cache decorator to cache the model and tokenizer loading
5
- @st.cache(allow_output_mutation=True)
6
  def load_pipeline():
7
  model_name = "NousResearch/Llama-2-7b-chat-hf" # Replace with your actual model name
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
  # Streamlit's cache decorator to cache the model and tokenizer loading
5
+
6
  def load_pipeline():
7
  model_name = "NousResearch/Llama-2-7b-chat-hf" # Replace with your actual model name
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)