fromated the try and except blocks for krypton
Browse files
app.py
CHANGED
@@ -26,11 +26,10 @@ model.to('cuda')
|
|
26 |
processor = AutoProcessor.from_pretrained(model_id)
|
27 |
|
28 |
# Confirming and setting the eos_token_id (if necessary)
|
29 |
-
# model.generation_config.eos_token_id = processor.tokenizer.eos_token_id
|
30 |
model.generation_config.eos_token_id = 128009
|
31 |
|
32 |
@spaces.GPU(duration=120)
|
33 |
-
def
|
34 |
print(message)
|
35 |
if message["files"]:
|
36 |
# message["files"][-1] is a Dict or just a string
|
@@ -47,13 +46,12 @@ def bot_streaming(message, history):
|
|
47 |
try:
|
48 |
if image is None:
|
49 |
# Handle the case where image is None
|
50 |
-
gr.Error("
|
51 |
except NameError:
|
52 |
# Handle the case where 'image' is not defined at all
|
53 |
-
gr.Error("
|
54 |
|
55 |
prompt = f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
56 |
-
# print(f"prompt: {prompt}")
|
57 |
image = Image.open(image)
|
58 |
inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
|
59 |
|
@@ -64,7 +62,6 @@ def bot_streaming(message, history):
|
|
64 |
thread.start()
|
65 |
|
66 |
text_prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
67 |
-
# print(f"text_prompt: {text_prompt}")
|
68 |
|
69 |
buffer = ""
|
70 |
time.sleep(0.5)
|
@@ -73,12 +70,8 @@ def bot_streaming(message, history):
|
|
73 |
if "<|eot_id|>" in new_text:
|
74 |
new_text = new_text.split("<|eot_id|>")[0]
|
75 |
buffer += new_text
|
76 |
-
|
77 |
-
# generated_text_without_prompt = buffer[len(text_prompt):]
|
78 |
generated_text_without_prompt = buffer
|
79 |
-
# print(generated_text_without_prompt)
|
80 |
time.sleep(0.06)
|
81 |
-
# print(f"new_text: {generated_text_without_prompt}")
|
82 |
yield generated_text_without_prompt
|
83 |
|
84 |
|
@@ -88,7 +81,7 @@ chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeh
|
|
88 |
with gr.Blocks(fill_height=True) as demo:
|
89 |
gr.Markdown(DESCRIPTION)
|
90 |
gr.ChatInterface(
|
91 |
-
fn=
|
92 |
chatbot=chatbot,
|
93 |
fill_height=True,
|
94 |
multimodal=True,
|
|
|
26 |
processor = AutoProcessor.from_pretrained(model_id)
|
27 |
|
28 |
# Confirming and setting the eos_token_id (if necessary)
|
|
|
29 |
model.generation_config.eos_token_id = 128009
|
30 |
|
31 |
@spaces.GPU(duration=120)
|
32 |
+
def krypton(message, history):
|
33 |
print(message)
|
34 |
if message["files"]:
|
35 |
# message["files"][-1] is a Dict or just a string
|
|
|
46 |
try:
|
47 |
if image is None:
|
48 |
# Handle the case where image is None
|
49 |
+
gr.Error("Please upload an image so kypton can understand.")
|
50 |
except NameError:
|
51 |
# Handle the case where 'image' is not defined at all
|
52 |
+
gr.Error("Upload an image so krypton can work.")
|
53 |
|
54 |
prompt = f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
|
|
55 |
image = Image.open(image)
|
56 |
inputs = processor(prompt, image, return_tensors='pt').to(0, torch.float16)
|
57 |
|
|
|
62 |
thread.start()
|
63 |
|
64 |
text_prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
|
|
65 |
|
66 |
buffer = ""
|
67 |
time.sleep(0.5)
|
|
|
70 |
if "<|eot_id|>" in new_text:
|
71 |
new_text = new_text.split("<|eot_id|>")[0]
|
72 |
buffer += new_text
|
|
|
|
|
73 |
generated_text_without_prompt = buffer
|
|
|
74 |
time.sleep(0.06)
|
|
|
75 |
yield generated_text_without_prompt
|
76 |
|
77 |
|
|
|
81 |
with gr.Blocks(fill_height=True) as demo:
|
82 |
gr.Markdown(DESCRIPTION)
|
83 |
gr.ChatInterface(
|
84 |
+
fn=kypton,
|
85 |
chatbot=chatbot,
|
86 |
fill_height=True,
|
87 |
multimodal=True,
|