File size: 1,036 Bytes
1664bb9
 
 
bc5dafd
a9a4fe5
bc5dafd
3c82543
1664bb9
 
bc5dafd
 
1664bb9
bc5dafd
 
1664bb9
 
 
 
 
 
 
 
bc5dafd
 
 
 
 
 
1664bb9
bc5dafd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Streamlit's cache decorator to cache the model and tokenizer loading

def load_pipeline():
    model_name = "NousResearch/Llama-2-7b-chat-hf"  # Replace with your actual model name
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)
    chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=300)
    return chat_pipeline

# Initialize the pipeline
chat_pipeline = load_pipeline()

st.title("Interact with Your Model")

# User input
user_input = st.text_area("Enter your prompt:", "")

if st.button("Submit"):
    if user_input:
        try:
            # Generate text based on the input
            generated_text = chat_pipeline(user_input)[0]['generated_text']
            st.write(generated_text)
        except Exception as e:
            st.error(f"Error generating text: {e}")
    else:
        st.write("Please enter a prompt.")