Abbeite commited on
Commit
1664bb9
1 Parent(s): a2d8049

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import logging
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+
5
+ # Set the logger to display only CRITICAL messages
6
+ logging.basicConfig(level=logging.CRITICAL)
7
+
8
+
9
+ # Cache the model and tokenizer to avoid reloading it every time
10
+ @st.experimental_singleton
11
+ def load_model():
12
+ model_name = "Abbeite/trail_wl" # Replace with your actual model name
13
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ model = AutoModelForCausalLM.from_pretrained(model_name)
15
+ return model, tokenizer
16
+
17
+ model, tokenizer = load_model()
18
+
19
+ # Function to generate text with the model
20
+ def generate_text(prompt):
21
+ formatted_prompt = f"[INST] {prompt} [/INST]" # Format the prompt according to your specification
22
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=300)
23
+ result = pipe(formatted_prompt)
24
+ return result[0]['generated_text']
25
+
26
+ st.title("Interact with Your Model")
27
+
28
+ # User input
29
+ user_input = st.text_area("Enter your prompt:", "")
30
+
31
+ if st.button("Submit"):
32
+ if user_input:
33
+ # Generate text based on the input
34
+ generated_text = generate_text(user_input)
35
+ st.write(generated_text)
36
+ else:
37
+ st.write("Please enter a prompt.")