Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,31 @@
|
|
1 |
import streamlit as st
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
|
3 |
+
from transformers import AutoTokenizer
|
4 |
+
import transformers
|
5 |
+
import torch
|
6 |
+
model = "PY007/TinyLlama-1.1B-Chat-v0.1"
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
8 |
+
pipeline = transformers.pipeline(
|
9 |
+
"text-generation",
|
10 |
+
model=model,
|
11 |
+
torch_dtype=torch.float16,
|
12 |
+
device_map="auto",
|
13 |
+
)
|
14 |
+
|
15 |
+
prompt = "What is 6534+2343?"
|
16 |
+
formatted_prompt = (
|
17 |
+
f"### Human: {prompt}### Assistant:"
|
18 |
+
)
|
19 |
+
|
20 |
+
|
21 |
+
sequences = pipeline(
|
22 |
+
formatted_prompt,
|
23 |
+
do_sample=True,
|
24 |
+
top_k=50,
|
25 |
+
top_p = 0.7,
|
26 |
+
num_return_sequences=1,
|
27 |
+
repetition_penalty=1.1,
|
28 |
+
max_new_tokens=500,
|
29 |
+
)
|
30 |
+
for seq in sequences:
|
31 |
+
st.write(f"Result: {seq['generated_text']}")
|