Create test.py
Browse files
test.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import gradio as gr
|
3 |
+
import ast
|
4 |
+
import requests
|
5 |
+
|
6 |
+
from theme_dropdown import create_theme_dropdown # noqa: F401
|
7 |
+
|
8 |
+
dropdown, js = create_theme_dropdown()
|
9 |
+
|
10 |
+
models = [
|
11 |
+
{"name": "Stable Diffusion 2", "url": "stabilityai/stable-diffusion-2-1"},
|
12 |
+
{"name": "stability AI", "url": "stabilityai/stable-diffusion-2-1-base"},
|
13 |
+
{"name": "Compressed-S-D", "url": "nota-ai/bk-sdm-small"},
|
14 |
+
{"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"},
|
15 |
+
{"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"},
|
16 |
+
{"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"},
|
17 |
+
{"name": "Robo Diffusion", "url": "nousr/robo-diffusion"},
|
18 |
+
{"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"},
|
19 |
+
]
|
20 |
+
|
21 |
+
text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
|
22 |
+
|
23 |
+
current_model = models[0]
|
24 |
+
|
25 |
+
models2 = []
|
26 |
+
for model in models:
|
27 |
+
model_url = f"models/{model['url']}"
|
28 |
+
loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
|
29 |
+
models2.append(loaded_model)
|
30 |
+
|
31 |
+
def text_it(inputs, text_gen=text_gen):
|
32 |
+
return text_gen(inputs)
|
33 |
+
|
34 |
+
def flip_text(x):
|
35 |
+
return x[::-1]
|
36 |
+
|
37 |
+
def send_it(inputs, model_choice):
|
38 |
+
proc = models2[model_choice]
|
39 |
+
return proc(inputs)
|
40 |
+
|
41 |
+
|
42 |
+
def flip_image(x):
|
43 |
+
return np.fliplr(x)
|
44 |
+
|
45 |
+
|
46 |
+
def set_model(current_model_index):
|
47 |
+
global current_model
|
48 |
+
current_model = models[current_model_index]
|
49 |
+
return gr.update(value=f"{current_model['name']}")
|
50 |
+
|
51 |
+
|
52 |
+
with gr.Blocks(theme='pikto/theme@>=0.0.1,<0.0.3') as pan:
|
53 |
+
gr.Markdown("AI CONTENT TOOLS.")
|
54 |
+
|
55 |
+
with gr.Tab("T-to-I"):
|
56 |
+
|
57 |
+
##model = ("stabilityai/stable-diffusion-2-1")
|
58 |
+
model_name1 = gr.Dropdown(
|
59 |
+
label="Choose Model",
|
60 |
+
choices=[m["name"] for m in models],
|
61 |
+
type="index",
|
62 |
+
value=current_model["name"],
|
63 |
+
interactive=True,
|
64 |
+
)
|
65 |
+
input_text = gr.Textbox(label="Prompt idea",)
|
66 |
+
|
67 |
+
## run = gr.Button("Generate Images")
|
68 |
+
with gr.Row():
|
69 |
+
see_prompts = gr.Button("Generate Prompts")
|
70 |
+
run = gr.Button("Generate Images", variant="primary")
|
71 |
+
|
72 |
+
with gr.Row():
|
73 |
+
magic1 = gr.Textbox(label="Generated Prompt", lines=2)
|
74 |
+
output1 = gr.Image(label="")
|
75 |
+
|
76 |
+
|
77 |
+
with gr.Row():
|
78 |
+
magic2 = gr.Textbox(label="Generated Prompt", lines=2)
|
79 |
+
output2 = gr.Image(label="")
|
80 |
+
|
81 |
+
|
82 |
+
run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
|
83 |
+
run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
|
84 |
+
see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
|
85 |
+
see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
|
86 |
+
|
87 |
+
model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2,])
|
88 |
+
|
89 |
+
with gr.Tab("Flip Image"):
|
90 |
+
#Using Gradio Demos as API - This is Hot!
|
91 |
+
API_URL_INITIAL = "https://ysharma-playground-ai-exploration.hf.space/run/initial_dataframe"
|
92 |
+
API_URL_NEXT10 = "https://ysharma-playground-ai-exploration.hf.space/run/next_10_rows"
|
93 |
+
|
94 |
+
#define inference function
|
95 |
+
#First: Get initial images for the grid display
|
96 |
+
def get_initial_images():
|
97 |
+
response = requests.post(API_URL_INITIAL, json={
|
98 |
+
"data": []
|
99 |
+
}).json()
|
100 |
+
#data = response["data"][0]['data'][0][0][:-1]
|
101 |
+
response_dict = response['data'][0]
|
102 |
+
return response_dict #, [resp[0][:-1] for resp in response["data"][0]["data"]]
|
103 |
+
|
104 |
+
#Second: Process response dictionary to get imges as hyperlinked image tags
|
105 |
+
def process_response(response_dict):
|
106 |
+
return [resp[0][:-1] for resp in response_dict["data"]]
|
107 |
+
|
108 |
+
response_dict = get_initial_images()
|
109 |
+
initial = process_response(response_dict)
|
110 |
+
initial_imgs = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);">\n' + "\n".join(initial[:-1])
|
111 |
+
|
112 |
+
#Third: Load more images for the grid
|
113 |
+
def get_next10_images(response_dict, row_count):
|
114 |
+
row_count = int(row_count)
|
115 |
+
#print("(1)",type(response_dict))
|
116 |
+
#Convert the string to a dictionary
|
117 |
+
if isinstance(response_dict, dict) == False :
|
118 |
+
response_dict = ast.literal_eval(response_dict)
|
119 |
+
response = requests.post(API_URL_NEXT10, json={
|
120 |
+
"data": [response_dict, row_count ] #len(initial)-1
|
121 |
+
}).json()
|
122 |
+
row_count+=10
|
123 |
+
response_dict = response['data'][0]
|
124 |
+
#print("(2)",type(response))
|
125 |
+
#print("(3)",type(response['data'][0]))
|
126 |
+
next_set = [resp[0][:-1] for resp in response_dict["data"]]
|
127 |
+
next_set_images = '<div style="display: grid; grid-template-columns: repeat(3, 1fr); grid-template-rows: repeat(3, 1fr); grid-gap: 0; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2); ">\n' + "\n".join(next_set[:-1])
|
128 |
+
return response_dict, row_count, next_set_images #response['data'][0]
|
129 |
+
|
130 |
+
#get_next10_images(response_dict=response_dict, row_count=9)
|
131 |
+
#position: fixed; top: 0; left: 0; width: 100%; background-color: #fff; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
|
132 |
+
|
133 |
+
#Defining the Blocks layout
|
134 |
+
with gr.Blocks(css = """#img_search img {width: 100%; height: 100%; object-fit: cover;}""") as demo:
|
135 |
+
gr.HTML(value="top of page", elem_id="top",visible=False)
|
136 |
+
gr.HTML("""<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
137 |
+
<div
|
138 |
+
style="
|
139 |
+
display: inline-flex;
|
140 |
+
align-items: center;
|
141 |
+
gap: 0.8rem;
|
142 |
+
font-size: 1.75rem;
|
143 |
+
"
|
144 |
+
>
|
145 |
+
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
146 |
+
Using Gradio Demos as API - 2 </h1><br></div>
|
147 |
+
<div><h4 style="font-weight: 500; margin-bottom: 7px; margin-top: 5px;">
|
148 |
+
Stream <a href="https://github.com/playgroundai/liked_images" target="_blank">PlaygroundAI Images</a> ina beautiful grid</h4><br>
|
149 |
+
</div>""")
|
150 |
+
with gr.Accordion(label="Details about the working:", open=False, elem_id='accordion'):
|
151 |
+
gr.HTML("""
|
152 |
+
<p style="margin-bottom: 10px; font-size: 90%"><br>
|
153 |
+
▶️Do you see the "view api" link located in the footer of this application?
|
154 |
+
By clicking on this link, a page will open which provides documentation on the REST API that developers can use to query the Interface function / Block events.<br>
|
155 |
+
▶️In this demo, I am making such an API request to the <a href="https://huggingface.co/spaces/ysharma/Playground_AI_Exploration" target="_blank">Playground_AI_Exploration</a> Space.<br>
|
156 |
+
▶️I am exposing an API endpoint of this Gradio app as well. This can easily be done by one line of code, just set the api_name parameter of the event listener.
|
157 |
+
</p></div>""")
|
158 |
+
|
159 |
+
with gr.Column(): #(elem_id = "col-container"):
|
160 |
+
b1 = gr.Button("Load More Images").style(full_width=False)
|
161 |
+
df = gr.Textbox(visible=False,elem_id='dataframe', value=response_dict)
|
162 |
+
row_count = gr.Number(visible=False, value=19 )
|
163 |
+
img_search = gr.HTML(label = 'Images from PlaygroundAI dataset', elem_id="img_search",
|
164 |
+
value=initial_imgs ) #initial[:-1] )
|
165 |
+
|
166 |
+
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/Stream_PlaygroundAI_Images?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></center>
|
167 |
+
</p></div>''')
|
168 |
+
b1.click(get_next10_images, [df, row_count], [df, row_count, img_search], api_name = "load_playgroundai_images" )
|
169 |
+
|
170 |
+
|
171 |
+
with gr.Tab("Diffuser"):
|
172 |
+
with gr.Row():
|
173 |
+
text_input = gr.Textbox() ## Diffuser
|
174 |
+
image_output = gr.Image()
|
175 |
+
image_button = gr.Button("Flip")
|
176 |
+
|
177 |
+
|
178 |
+
|
179 |
+
# text_button.click(flip_text, inputs=text_input, outputs=text_output)
|
180 |
+
# image_button.click(flip_image, inputs=image_input, outputs=image_output)
|
181 |
+
pan.queue(concurrency_count=200)
|
182 |
+
pan.launch(inline=True, show_api=True, max_threads=400)
|