|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
|
|
|
|
|
def load_pipeline(): |
|
model_name = "NousResearch/Llama-2-7b-chat-hf" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
chat_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=300) |
|
return chat_pipeline |
|
|
|
|
|
chat_pipeline = load_pipeline() |
|
|
|
st.title("Interact with Your Model") |
|
|
|
|
|
user_input = st.text_area("Enter your prompt:", "") |
|
|
|
if st.button("Submit"): |
|
if user_input: |
|
try: |
|
|
|
generated_text = chat_pipeline(user_input)[0]['generated_text'] |
|
st.write(generated_text) |
|
except Exception as e: |
|
st.error(f"Error generating text: {e}") |
|
else: |
|
st.write("Please enter a prompt.") |
|
|