Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
-
import
|
2 |
-
import time
|
3 |
-
import json
|
4 |
-
import base64
|
5 |
-
from datetime import datetime
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
import gradio as gr
|
9 |
from gradio_imageslider import ImageSlider
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
from controlnet_aux import AnylineDetector
|
12 |
from compel import Compel, ReturnedEmbeddingsType
|
13 |
from PIL import Image
|
14 |
-
import
|
|
|
|
|
15 |
|
16 |
-
# Configuration
|
17 |
IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1"
|
18 |
IS_SPACE = os.environ.get("SPACE_ID", None) is not None
|
|
|
19 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
dtype = torch.float16
|
|
|
21 |
LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
|
22 |
|
23 |
print(f"device: {device}")
|
24 |
print(f"dtype: {dtype}")
|
25 |
print(f"low memory: {LOW_MEMORY}")
|
26 |
|
27 |
-
|
28 |
model = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
|
|
|
29 |
scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
|
|
|
|
|
|
|
30 |
controlnet = ControlNetModel.from_pretrained(
|
31 |
"TheMistoAI/MistoLine",
|
32 |
torch_dtype=torch.float16,
|
@@ -54,21 +63,6 @@ anyline = AnylineDetector.from_pretrained(
|
|
54 |
"TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
|
55 |
).to(device)
|
56 |
|
57 |
-
# Global variables for metadata and likes cache
|
58 |
-
image_metadata = pd.DataFrame(columns=['Filename', 'Prompt', 'Likes', 'Dislikes', 'Hearts', 'Created'])
|
59 |
-
LIKES_CACHE_FILE = "likes_cache.json"
|
60 |
-
|
61 |
-
def load_likes_cache():
|
62 |
-
if os.path.exists(LIKES_CACHE_FILE):
|
63 |
-
with open(LIKES_CACHE_FILE, 'r') as f:
|
64 |
-
return json.load(f)
|
65 |
-
return {}
|
66 |
-
|
67 |
-
def save_likes_cache(cache):
|
68 |
-
with open(LIKES_CACHE_FILE, 'w') as f:
|
69 |
-
json.dump(cache, f)
|
70 |
-
|
71 |
-
likes_cache = load_likes_cache()
|
72 |
|
73 |
def pad_image(image):
|
74 |
w, h = image.size
|
@@ -76,81 +70,19 @@ def pad_image(image):
|
|
76 |
return image
|
77 |
elif w > h:
|
78 |
new_image = Image.new(image.mode, (w, w), (0, 0, 0))
|
79 |
-
|
|
|
|
|
80 |
return new_image
|
81 |
else:
|
82 |
new_image = Image.new(image.mode, (h, h), (0, 0, 0))
|
83 |
-
|
|
|
|
|
84 |
return new_image
|
85 |
|
86 |
-
def create_download_link(filename):
|
87 |
-
with open(filename, "rb") as file:
|
88 |
-
encoded_string = base64.b64encode(file.read()).decode('utf-8')
|
89 |
-
download_link = f'<a href="data:image/png;base64,{encoded_string}" download="{filename}">Download Image</a>'
|
90 |
-
return download_link
|
91 |
-
|
92 |
-
def save_image(image: Image.Image, prompt: str) -> str:
|
93 |
-
global image_metadata, likes_cache
|
94 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
95 |
-
safe_prompt = ''.join(e for e in prompt if e.isalnum() or e.isspace())[:50]
|
96 |
-
filename = f"{timestamp}_{safe_prompt}.png"
|
97 |
-
image.save(filename)
|
98 |
-
new_row = pd.DataFrame({
|
99 |
-
'Filename': [filename],
|
100 |
-
'Prompt': [prompt],
|
101 |
-
'Likes': [0],
|
102 |
-
'Dislikes': [0],
|
103 |
-
'Hearts': [0],
|
104 |
-
'Created': [datetime.now()]
|
105 |
-
})
|
106 |
-
image_metadata = pd.concat([image_metadata, new_row], ignore_index=True)
|
107 |
-
likes_cache[filename] = {'likes': 0, 'dislikes': 0, 'hearts': 0}
|
108 |
-
save_likes_cache(likes_cache)
|
109 |
-
return filename
|
110 |
-
|
111 |
-
def get_image_gallery():
|
112 |
-
global image_metadata
|
113 |
-
image_files = image_metadata['Filename'].tolist()
|
114 |
-
return [(file, get_image_caption(file)) for file in image_files if os.path.exists(file)]
|
115 |
-
|
116 |
-
def get_image_caption(filename):
|
117 |
-
global likes_cache, image_metadata
|
118 |
-
if filename in likes_cache:
|
119 |
-
likes = likes_cache[filename]['likes']
|
120 |
-
dislikes = likes_cache[filename]['dislikes']
|
121 |
-
hearts = likes_cache[filename]['hearts']
|
122 |
-
prompt = image_metadata[image_metadata['Filename'] == filename]['Prompt'].values[0]
|
123 |
-
return f"{filename}\nPrompt: {prompt}\n👍 {likes} 👎 {dislikes} ❤️ {hearts}"
|
124 |
-
return filename
|
125 |
-
|
126 |
-
def delete_all_images():
|
127 |
-
global image_metadata, likes_cache
|
128 |
-
for file in image_metadata['Filename']:
|
129 |
-
if os.path.exists(file):
|
130 |
-
os.remove(file)
|
131 |
-
image_metadata = pd.DataFrame(columns=['Filename', 'Prompt', 'Likes', 'Dislikes', 'Hearts', 'Created'])
|
132 |
-
likes_cache = {}
|
133 |
-
save_likes_cache(likes_cache)
|
134 |
-
return get_image_gallery(), image_metadata.values.tolist()
|
135 |
-
|
136 |
-
def delete_image(filename):
|
137 |
-
global image_metadata, likes_cache
|
138 |
-
if filename and os.path.exists(filename):
|
139 |
-
os.remove(filename)
|
140 |
-
image_metadata = image_metadata[image_metadata['Filename'] != filename]
|
141 |
-
if filename in likes_cache:
|
142 |
-
del likes_cache[filename]
|
143 |
-
save_likes_cache(likes_cache)
|
144 |
-
return get_image_gallery(), image_metadata.values.tolist()
|
145 |
|
146 |
-
|
147 |
-
global likes_cache
|
148 |
-
if filename in likes_cache:
|
149 |
-
likes_cache[filename][vote_type.lower()] += 1
|
150 |
-
save_likes_cache(likes_cache)
|
151 |
-
return get_image_gallery(), image_metadata.values.tolist()
|
152 |
-
|
153 |
-
@gr.on(queue_pred_done=True)
|
154 |
def predict(
|
155 |
input_image,
|
156 |
prompt,
|
@@ -197,136 +129,106 @@ def predict(
|
|
197 |
eta=1.0,
|
198 |
)
|
199 |
print(f"Time taken: {time.time() - last_time}")
|
200 |
-
|
201 |
-
|
202 |
-
download_link = create_download_link(filename)
|
203 |
-
return (padded_image, generated_image), padded_image, anyline_image, download_link, get_image_gallery(), image_metadata.values.tolist()
|
204 |
|
205 |
css = """
|
206 |
-
#intro
|
207 |
-
max-width:
|
208 |
-
text-align: center;
|
209 |
-
margin: 0 auto;
|
210 |
}
|
211 |
-
.gradio-container {max-width: 1200px !important}
|
212 |
-
footer {visibility: hidden}
|
213 |
"""
|
214 |
|
215 |
-
with gr.Blocks(css=css
|
216 |
gr.Markdown(
|
217 |
"""
|
218 |
-
#
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
- SDXL Controlnet: [TheMistoAI/MistoLine](https://huggingface.co/TheMistoAI/MistoLine)
|
225 |
-
- [Anyline with Controlnet Aux](https://github.com/huggingface/controlnet_aux)
|
226 |
-
- For upscaling, see [Enhance This Demo](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL)
|
227 |
""",
|
228 |
elem_id="intro",
|
229 |
)
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
prompt
|
236 |
-
|
237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
)
|
239 |
-
|
240 |
-
|
241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
)
|
243 |
-
|
244 |
minimum=0,
|
245 |
-
maximum=
|
246 |
-
value=1415926535897932,
|
247 |
step=1,
|
248 |
-
|
249 |
-
|
250 |
)
|
251 |
-
with gr.Accordion(label="Advanced", open=False):
|
252 |
-
guidance_scale = gr.Slider(
|
253 |
-
minimum=0,
|
254 |
-
maximum=50,
|
255 |
-
value=8.5,
|
256 |
-
step=0.001,
|
257 |
-
label="Guidance Scale",
|
258 |
-
)
|
259 |
-
controlnet_conditioning_scale = gr.Slider(
|
260 |
-
minimum=0,
|
261 |
-
maximum=1,
|
262 |
-
step=0.001,
|
263 |
-
value=0.5,
|
264 |
-
label="ControlNet Conditioning Scale",
|
265 |
-
)
|
266 |
-
strength = gr.Slider(
|
267 |
-
minimum=0,
|
268 |
-
maximum=1,
|
269 |
-
step=0.001,
|
270 |
-
value=1,
|
271 |
-
label="Strength",
|
272 |
-
)
|
273 |
-
controlnet_start = gr.Slider(
|
274 |
-
minimum=0,
|
275 |
-
maximum=1,
|
276 |
-
step=0.001,
|
277 |
-
value=0.0,
|
278 |
-
label="ControlNet Start",
|
279 |
-
)
|
280 |
-
controlnet_end = gr.Slider(
|
281 |
-
minimum=0.0,
|
282 |
-
maximum=1.0,
|
283 |
-
step=0.001,
|
284 |
-
value=1.0,
|
285 |
-
label="ControlNet End",
|
286 |
-
)
|
287 |
-
guassian_sigma = gr.Slider(
|
288 |
-
minimum=0.01,
|
289 |
-
maximum=10.0,
|
290 |
-
step=0.1,
|
291 |
-
value=2.0,
|
292 |
-
label="(Anyline) Guassian Sigma",
|
293 |
-
)
|
294 |
-
intensity_threshold = gr.Slider(
|
295 |
-
minimum=0,
|
296 |
-
maximum=255,
|
297 |
-
step=1,
|
298 |
-
value=3,
|
299 |
-
label="(Anyline) Intensity Threshold",
|
300 |
-
)
|
301 |
-
|
302 |
-
btn = gr.Button("Generate")
|
303 |
-
with gr.Column(scale=2):
|
304 |
-
with gr.Group():
|
305 |
-
image_slider = ImageSlider(position=0.5)
|
306 |
-
with gr.Row():
|
307 |
-
padded_image = gr.Image(type="pil", label="Padded Image")
|
308 |
-
anyline_image = gr.Image(type="pil", label="Anyline Image")
|
309 |
-
download_link = gr.HTML(label="Download Generated Image")
|
310 |
-
|
311 |
-
with gr.Tab("Gallery and Voting"):
|
312 |
-
image_gallery = gr.Gallery(label="Generated Images", show_label=True, columns=4, height="auto")
|
313 |
-
|
314 |
-
with gr.Row():
|
315 |
-
like_button = gr.Button("👍 Like")
|
316 |
-
dislike_button = gr.Button("👎 Dislike")
|
317 |
-
heart_button = gr.Button("❤️ Heart")
|
318 |
-
delete_image_button = gr.Button("🗑️ Delete Selected Image")
|
319 |
-
|
320 |
-
selected_image = gr.State(None)
|
321 |
-
|
322 |
-
with gr.Tab("Metadata and Management"):
|
323 |
-
metadata_df = gr.Dataframe(
|
324 |
-
label="Image Metadata",
|
325 |
-
headers=["Filename", "Prompt", "Likes", "Dislikes", "Hearts", "Created"],
|
326 |
-
interactive=False
|
327 |
-
)
|
328 |
-
delete_all_button = gr.Button("🗑️ Delete All Images")
|
329 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
inputs = [
|
331 |
image_input,
|
332 |
prompt,
|
@@ -340,20 +242,10 @@ This demo showcases the capabilities of [TheMistoAI/MistoLine](https://huggingfa
|
|
340 |
guassian_sigma,
|
341 |
intensity_threshold,
|
342 |
]
|
343 |
-
outputs = [image_slider, padded_image, anyline_image
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
image_gallery.select(fn=lambda evt: evt, inputs=[], outputs=[selected_image])
|
348 |
-
|
349 |
-
like_button.click(fn=lambda x: vote(x, 'likes'), inputs=[selected_image], outputs=[image_gallery, metadata_df])
|
350 |
-
dislike_button.click(fn=lambda x: vote(x, 'dislikes'), inputs=[selected_image], outputs=[image_gallery, metadata_df])
|
351 |
-
heart_button.click(fn=lambda x: vote(x, 'hearts'), inputs=[selected_image], outputs=[image_gallery, metadata_df])
|
352 |
-
delete_image_button.click(fn=delete_image, inputs=[selected_image], outputs=[image_gallery, metadata_df])
|
353 |
-
delete_all_button.click(fn=delete_all_images, inputs=[], outputs=[image_gallery, metadata_df])
|
354 |
-
|
355 |
-
demo.load(fn=lambda: (get_image_gallery(), image_metadata.values.tolist()), outputs=[image_gallery, metadata_df])
|
356 |
-
|
357 |
gr.Examples(
|
358 |
fn=predict,
|
359 |
inputs=inputs,
|
@@ -451,7 +343,9 @@ This demo showcases the capabilities of [TheMistoAI/MistoLine](https://huggingfa
|
|
451 |
3,
|
452 |
],
|
453 |
],
|
454 |
-
cache_examples=
|
455 |
)
|
456 |
|
457 |
-
|
|
|
|
|
|
1 |
+
import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
from gradio_imageslider import ImageSlider
|
4 |
+
import torch
|
5 |
+
|
6 |
+
torch.jit.script = lambda f: f
|
7 |
+
from diffusers import (
|
8 |
+
ControlNetModel,
|
9 |
+
StableDiffusionXLControlNetImg2ImgPipeline,
|
10 |
+
DDIMScheduler,
|
11 |
+
)
|
12 |
from controlnet_aux import AnylineDetector
|
13 |
from compel import Compel, ReturnedEmbeddingsType
|
14 |
from PIL import Image
|
15 |
+
import os
|
16 |
+
import time
|
17 |
+
import numpy as np
|
18 |
|
|
|
19 |
IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1"
|
20 |
IS_SPACE = os.environ.get("SPACE_ID", None) is not None
|
21 |
+
|
22 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
23 |
dtype = torch.float16
|
24 |
+
|
25 |
LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
|
26 |
|
27 |
print(f"device: {device}")
|
28 |
print(f"dtype: {dtype}")
|
29 |
print(f"low memory: {LOW_MEMORY}")
|
30 |
|
31 |
+
|
32 |
model = "stabilityai/stable-diffusion-xl-base-1.0"
|
33 |
+
# model = "stabilityai/sdxl-turbo"
|
34 |
+
# vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype)
|
35 |
scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
|
36 |
+
# controlnet = ControlNetModel.from_pretrained(
|
37 |
+
# "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
|
38 |
+
# )
|
39 |
controlnet = ControlNetModel.from_pretrained(
|
40 |
"TheMistoAI/MistoLine",
|
41 |
torch_dtype=torch.float16,
|
|
|
63 |
"TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
|
64 |
).to(device)
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
def pad_image(image):
|
68 |
w, h = image.size
|
|
|
70 |
return image
|
71 |
elif w > h:
|
72 |
new_image = Image.new(image.mode, (w, w), (0, 0, 0))
|
73 |
+
pad_w = 0
|
74 |
+
pad_h = (w - h) // 2
|
75 |
+
new_image.paste(image, (0, pad_h))
|
76 |
return new_image
|
77 |
else:
|
78 |
new_image = Image.new(image.mode, (h, h), (0, 0, 0))
|
79 |
+
pad_w = (h - w) // 2
|
80 |
+
pad_h = 0
|
81 |
+
new_image.paste(image, (pad_w, 0))
|
82 |
return new_image
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
+
@spaces.GPU
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
def predict(
|
87 |
input_image,
|
88 |
prompt,
|
|
|
129 |
eta=1.0,
|
130 |
)
|
131 |
print(f"Time taken: {time.time() - last_time}")
|
132 |
+
return (padded_image, images.images[0]), padded_image, anyline_image
|
133 |
+
|
|
|
|
|
134 |
|
135 |
css = """
|
136 |
+
#intro{
|
137 |
+
# max-width: 32rem;
|
138 |
+
# text-align: center;
|
139 |
+
# margin: 0 auto;
|
140 |
}
|
|
|
|
|
141 |
"""
|
142 |
|
143 |
+
with gr.Blocks(css=css) as demo:
|
144 |
gr.Markdown(
|
145 |
"""
|
146 |
+
# MistoLine ControlNet demo
|
147 |
+
|
148 |
+
You can upload an initial image and prompt to generate an enhanced version.
|
149 |
+
SDXL Controlnet [TheMistoAI/MistoLine](https://huggingface.co/TheMistoAI/MistoLine)
|
150 |
+
[Anyline with Controlnet Aux ](https://github.com/huggingface/controlnet_aux)
|
151 |
+
For upscaling see [Enhance This Demo](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL)
|
|
|
|
|
|
|
152 |
""",
|
153 |
elem_id="intro",
|
154 |
)
|
155 |
+
with gr.Row():
|
156 |
+
with gr.Column(scale=1):
|
157 |
+
image_input = gr.Image(type="pil", label="Input Image")
|
158 |
+
prompt = gr.Textbox(
|
159 |
+
label="Prompt",
|
160 |
+
info="The prompt is very important to get the desired results. Please try to describe the image as best as you can. Accepts Compel Syntax",
|
161 |
+
)
|
162 |
+
negative_prompt = gr.Textbox(
|
163 |
+
label="Negative Prompt",
|
164 |
+
value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
|
165 |
+
)
|
166 |
+
seed = gr.Slider(
|
167 |
+
minimum=0,
|
168 |
+
maximum=2**64 - 1,
|
169 |
+
value=1415926535897932,
|
170 |
+
step=1,
|
171 |
+
label="Seed",
|
172 |
+
randomize=True,
|
173 |
+
)
|
174 |
+
with gr.Accordion(label="Advanced", open=False):
|
175 |
+
guidance_scale = gr.Slider(
|
176 |
+
minimum=0,
|
177 |
+
maximum=50,
|
178 |
+
value=8.5,
|
179 |
+
step=0.001,
|
180 |
+
label="Guidance Scale",
|
181 |
+
)
|
182 |
+
controlnet_conditioning_scale = gr.Slider(
|
183 |
+
minimum=0,
|
184 |
+
maximum=1,
|
185 |
+
step=0.001,
|
186 |
+
value=0.5,
|
187 |
+
label="ControlNet Conditioning Scale",
|
188 |
+
)
|
189 |
+
strength = gr.Slider(
|
190 |
+
minimum=0,
|
191 |
+
maximum=1,
|
192 |
+
step=0.001,
|
193 |
+
value=1,
|
194 |
+
label="Strength",
|
195 |
)
|
196 |
+
controlnet_start = gr.Slider(
|
197 |
+
minimum=0,
|
198 |
+
maximum=1,
|
199 |
+
step=0.001,
|
200 |
+
value=0.0,
|
201 |
+
label="ControlNet Start",
|
202 |
+
)
|
203 |
+
controlnet_end = gr.Slider(
|
204 |
+
minimum=0.0,
|
205 |
+
maximum=1.0,
|
206 |
+
step=0.001,
|
207 |
+
value=1.0,
|
208 |
+
label="ControlNet End",
|
209 |
+
)
|
210 |
+
guassian_sigma = gr.Slider(
|
211 |
+
minimum=0.01,
|
212 |
+
maximum=10.0,
|
213 |
+
step=0.1,
|
214 |
+
value=2.0,
|
215 |
+
label="(Anyline) Guassian Sigma",
|
216 |
)
|
217 |
+
intensity_threshold = gr.Slider(
|
218 |
minimum=0,
|
219 |
+
maximum=255,
|
|
|
220 |
step=1,
|
221 |
+
value=3,
|
222 |
+
label="(Anyline) Intensity Threshold",
|
223 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
|
225 |
+
btn = gr.Button()
|
226 |
+
with gr.Column(scale=2):
|
227 |
+
with gr.Group():
|
228 |
+
image_slider = ImageSlider(position=0.5)
|
229 |
+
with gr.Row():
|
230 |
+
padded_image = gr.Image(type="pil", label="Padded Image")
|
231 |
+
anyline_image = gr.Image(type="pil", label="Anyline Image")
|
232 |
inputs = [
|
233 |
image_input,
|
234 |
prompt,
|
|
|
242 |
guassian_sigma,
|
243 |
intensity_threshold,
|
244 |
]
|
245 |
+
outputs = [image_slider, padded_image, anyline_image]
|
246 |
+
btn.click(lambda x: None, inputs=None, outputs=image_slider).then(
|
247 |
+
fn=predict, inputs=inputs, outputs=outputs
|
248 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
gr.Examples(
|
250 |
fn=predict,
|
251 |
inputs=inputs,
|
|
|
343 |
3,
|
344 |
],
|
345 |
],
|
346 |
+
cache_examples="lazy",
|
347 |
)
|
348 |
|
349 |
+
|
350 |
+
demo.queue(api_open=True)
|
351 |
+
demo.launch(show_api=True)
|