|
import streamlit as st |
|
import logging |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
|
|
|
logging.basicConfig(level=logging.CRITICAL) |
|
|
|
|
|
|
|
@st.experimental_singleton |
|
def load_model(): |
|
model_name = "Abbeite/chest_and_physical_limitations2" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
return model, tokenizer |
|
|
|
model, tokenizer = load_model() |
|
|
|
|
|
def generate_text(prompt): |
|
formatted_prompt = f"[INST] {prompt} [/INST]" |
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=300) |
|
result = pipe(formatted_prompt) |
|
return result[0]['generated_text'] |
|
|
|
st.title("Interact with Your Model") |
|
|
|
|
|
user_input = st.text_area("Enter your prompt:", "") |
|
|
|
if st.button("Submit"): |
|
if user_input: |
|
|
|
generated_text = generate_text(user_input) |
|
st.write(generated_text) |
|
else: |
|
st.write("Please enter a prompt.") |