Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,10 +11,9 @@ import torch
|
|
11 |
# tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
|
12 |
# return model, tokenizer
|
13 |
|
14 |
-
|
15 |
-
def define_model():
|
16 |
generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
|
17 |
-
|
18 |
|
19 |
#@st.cache(allow_output_mutation=True)
|
20 |
#def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
|
@@ -25,7 +24,6 @@ def define_model():
|
|
25 |
|
26 |
|
27 |
#model, tokenizer = define_model()
|
28 |
-
generator = define_model()
|
29 |
|
30 |
prompt= st.text_area('Your prompt here',
|
31 |
'''Hello, I'm am conscious and''')
|
|
|
11 |
# tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
|
12 |
# return model, tokenizer
|
13 |
|
14 |
+
with st.spinner('Loading OPT-1.3b Model...'):
|
|
|
15 |
generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
|
16 |
+
st.success('Model loaded correctly!')
|
17 |
|
18 |
#@st.cache(allow_output_mutation=True)
|
19 |
#def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
|
|
|
24 |
|
25 |
|
26 |
#model, tokenizer = define_model()
|
|
|
27 |
|
28 |
prompt= st.text_area('Your prompt here',
|
29 |
'''Hello, I'm am conscious and''')
|