added chat functionality with the model from UI
Browse files
app.py
CHANGED
@@ -2,7 +2,6 @@ import torch
|
|
2 |
import gradio as gr
|
3 |
from transformers import TextIteratorStreamer, AutoProcessor, LlavaForConditionalGeneration
|
4 |
from PIL import Image
|
5 |
-
import requests
|
6 |
import threading
|
7 |
import spaces
|
8 |
import accelerate
|
@@ -23,12 +22,39 @@ model = LlavaForConditionalGeneration.from_pretrained(
|
|
23 |
|
24 |
processor = AutoProcessor.from_pretrained(model_id)
|
25 |
|
|
|
|
|
26 |
@spaces.GPU(duration=120)
|
27 |
-
def krypton(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
pil_image = Image.fromarray(
|
30 |
# image = Image.open(requests.get(url, stream=True).raw)
|
31 |
-
prompt = ("<|start_header_id|>user<|end_header_id|>\n\n<image>\
|
32 |
"<|start_header_id|>assistant<|end_header_id|>\n\n")
|
33 |
inputs = processor(prompt, pil_image, return_tensors='pt').to('cuda', torch.float16)
|
34 |
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=False)
|
@@ -36,12 +62,15 @@ def krypton(input_image):
|
|
36 |
print(output_text)
|
37 |
return output_text
|
38 |
|
|
|
|
|
39 |
with gr.Blocks(fill_height=True) as demo:
|
40 |
gr.Markdown(DESCRIPTION)
|
41 |
-
gr.
|
42 |
fn=krypton,
|
43 |
-
|
44 |
-
|
|
|
45 |
fill_height=True
|
46 |
)
|
47 |
|
|
|
2 |
import gradio as gr
|
3 |
from transformers import TextIteratorStreamer, AutoProcessor, LlavaForConditionalGeneration
|
4 |
from PIL import Image
|
|
|
5 |
import threading
|
6 |
import spaces
|
7 |
import accelerate
|
|
|
22 |
|
23 |
processor = AutoProcessor.from_pretrained(model_id)
|
24 |
|
25 |
+
model.generation_config.eos_token_id = 128009
|
26 |
+
|
27 |
@spaces.GPU(duration=120)
|
28 |
+
def krypton(input,
|
29 |
+
history):
|
30 |
+
"""
|
31 |
+
Recieves inputs (prompts with images if they were added),
|
32 |
+
the image is formated for pil and prompt is formated for the model,
|
33 |
+
to place it's output to the user, these prompts and images are passed in
|
34 |
+
the processor and generation of the model, than the output is decoded from the processor,
|
35 |
+
onto the UI.
|
36 |
+
"""
|
37 |
+
if input["files"]:
|
38 |
+
if type(input["files"][-1]) == dict:
|
39 |
+
image = input["files"][-1]["path"]
|
40 |
+
else:
|
41 |
+
image = input["files"][-1]
|
42 |
+
else:
|
43 |
+
# If no images were passed now, look at the past images to keep up as reference still to the prompts
|
44 |
+
# kept inside in tuples, the last one
|
45 |
+
for hist in history:
|
46 |
+
if type(hist[0]) == tuple:
|
47 |
+
image = hist[0][0]
|
48 |
+
try:
|
49 |
+
if image is None:
|
50 |
+
gr.Error("You need to upload an image please for krypton to work.")
|
51 |
+
except NameError:
|
52 |
+
# Image is not defined at all
|
53 |
+
gr.Error("Uplaod an image for Krypton to work")
|
54 |
|
55 |
+
pil_image = Image.fromarray(image.astype('uint8'), 'RGB')
|
56 |
# image = Image.open(requests.get(url, stream=True).raw)
|
57 |
+
prompt = ("<|start_header_id|>user<|end_header_id|>\n\n<image>\n{input['text']}<|eot_id|>"
|
58 |
"<|start_header_id|>assistant<|end_header_id|>\n\n")
|
59 |
inputs = processor(prompt, pil_image, return_tensors='pt').to('cuda', torch.float16)
|
60 |
outputs = model.generate(**inputs, max_new_tokens=200, do_sample=False)
|
|
|
62 |
print(output_text)
|
63 |
return output_text
|
64 |
|
65 |
+
chatbot=gr.Chatbot(fill_height=600, label="Krypt AI")
|
66 |
+
chat_input = gr.MultimodalTextbox(Interactive=True, file_types=["image"], placeholder="Enter your question or upload an image.", show_label=False)
|
67 |
with gr.Blocks(fill_height=True) as demo:
|
68 |
gr.Markdown(DESCRIPTION)
|
69 |
+
gr.ChatInterface(
|
70 |
fn=krypton,
|
71 |
+
chatbot=chatbot,
|
72 |
+
multimodal=True,
|
73 |
+
textbox=chat_input,
|
74 |
fill_height=True
|
75 |
)
|
76 |
|