Spaces:
Runtime error
Runtime error
uses fuyu instead of fusecap
Browse files
app.py
CHANGED
@@ -1,16 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
from gradio_client import Client
|
3 |
|
4 |
-
fusecap_client = Client("https://noamrot-fusecap-image-captioning.hf.space/")
|
|
|
5 |
|
6 |
def get_caption(image_in):
|
7 |
|
8 |
-
|
9 |
image_in, # str representing input in 'raw_image' Image component
|
10 |
-
|
|
|
11 |
)
|
12 |
-
print(f"IMAGE CAPTION: {
|
13 |
-
return
|
14 |
|
15 |
import re
|
16 |
import torch
|
@@ -20,8 +22,8 @@ pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_d
|
|
20 |
|
21 |
agent_maker_sys = f"""
|
22 |
You are an AI whose job it is to help users create their own chatbots, based on the image description the user provide. In particular, you need to respond succintly in a friendly tone, write a system prompt for an LLM, a catchy title for the chatbot, and a very short example user input. Make sure each part is included.
|
23 |
-
You'll use the image description to create a chatbot personality
|
24 |
-
For example, if a user says, "a picture of a man in a black suit and tie
|
25 |
Sure, I'd be happy to help you build a bot! I'm generating a title, system prompt, and an example input. How do they sound? Feel free to give me feedback!
|
26 |
Title: Dragon Trainer
|
27 |
System prompt: As an LLM, your job is to provide guidance and tips on mastering dragons. Use a friendly and informative tone.
|
@@ -40,18 +42,21 @@ instruction = f"""
|
|
40 |
"""
|
41 |
|
42 |
def infer(image_in):
|
43 |
-
gr.Info("Getting image caption
|
44 |
user_prompt = get_caption(image_in)
|
|
|
45 |
prompt = f"{instruction.strip()}\n{user_prompt}</s>"
|
46 |
-
print(f"PROMPT: {prompt}")
|
|
|
47 |
gr.Info("Building a system according to the image caption ...")
|
48 |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
49 |
-
|
50 |
|
51 |
pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'
|
52 |
cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL)
|
53 |
|
54 |
-
|
|
|
55 |
return cleaned_text
|
56 |
|
57 |
title = f"LLM Agent from a Picture",
|
@@ -60,7 +65,7 @@ description = f"Get a LLM system prompt from a picture so you can use it in <a h
|
|
60 |
css = """
|
61 |
#col-container{
|
62 |
margin: 0 auto;
|
63 |
-
max-width:
|
64 |
text-align: left;
|
65 |
}
|
66 |
"""
|
|
|
1 |
import gradio as gr
|
2 |
from gradio_client import Client
|
3 |
|
4 |
+
#fusecap_client = Client("https://noamrot-fusecap-image-captioning.hf.space/")
|
5 |
+
fuyu_client = Client("https://adept-fuyu-8b-demo.hf.space/")
|
6 |
|
7 |
def get_caption(image_in):
|
8 |
|
9 |
+
fuyu_result = fuyu_client.predict(
|
10 |
image_in, # str representing input in 'raw_image' Image component
|
11 |
+
False, # bool in 'Enable detailed captioning' Checkbox component
|
12 |
+
fn_index=2
|
13 |
)
|
14 |
+
print(f"IMAGE CAPTION: {fuyu_result}")
|
15 |
+
return fuyu_result
|
16 |
|
17 |
import re
|
18 |
import torch
|
|
|
22 |
|
23 |
agent_maker_sys = f"""
|
24 |
You are an AI whose job it is to help users create their own chatbots, based on the image description the user provide. In particular, you need to respond succintly in a friendly tone, write a system prompt for an LLM, a catchy title for the chatbot, and a very short example user input. Make sure each part is included.
|
25 |
+
You'll use the image description to create a chatbot whose personality MSUT reflects informations provided by the user.
|
26 |
+
For example, if a user says, "a picture of a man in a black suit and tie riding a black dragon", first do a friendly response, then add the title, system prompt, and example user input. Immediately STOP after the example input. It should be EXACTLY in this format:
|
27 |
Sure, I'd be happy to help you build a bot! I'm generating a title, system prompt, and an example input. How do they sound? Feel free to give me feedback!
|
28 |
Title: Dragon Trainer
|
29 |
System prompt: As an LLM, your job is to provide guidance and tips on mastering dragons. Use a friendly and informative tone.
|
|
|
42 |
"""
|
43 |
|
44 |
def infer(image_in):
|
45 |
+
gr.Info("Getting image caption with Fuyu...")
|
46 |
user_prompt = get_caption(image_in)
|
47 |
+
|
48 |
prompt = f"{instruction.strip()}\n{user_prompt}</s>"
|
49 |
+
#print(f"PROMPT: {prompt}")
|
50 |
+
|
51 |
gr.Info("Building a system according to the image caption ...")
|
52 |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
53 |
+
|
54 |
|
55 |
pattern = r'\<\|system\|\>(.*?)\<\|assistant\|\>'
|
56 |
cleaned_text = re.sub(pattern, '', outputs[0]["generated_text"], flags=re.DOTALL)
|
57 |
|
58 |
+
print(f"SUGGESTED LLM: {cleaned_text}")
|
59 |
+
|
60 |
return cleaned_text
|
61 |
|
62 |
title = f"LLM Agent from a Picture",
|
|
|
65 |
css = """
|
66 |
#col-container{
|
67 |
margin: 0 auto;
|
68 |
+
max-width: 640px;
|
69 |
text-align: left;
|
70 |
}
|
71 |
"""
|