radames commited on
Commit
a510503
1 Parent(s): 39e5a08
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  **/midas_models/
2
  stable-diffusion-2-depth/
3
- __pycache__
 
 
1
  **/midas_models/
2
  stable-diffusion-2-depth/
3
+ __pycache__
4
+ gradio_cached_examples
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Stablediffusion Depth2img
3
- emoji: 💩
4
  colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
 
1
  ---
2
  title: Stablediffusion Depth2img
3
+ emoji: 🧊🖼
4
  colorFrom: green
5
  colorTo: yellow
6
  sdk: gradio
stablediffusion/examples/baby.jpg ADDED
stablediffusion/examples/gol.jpg ADDED
stablediffusion/scripts/gradio/depth2img.py CHANGED
@@ -158,7 +158,7 @@ sampler = initialize_model(sys.argv[1], sys.argv[2])
158
  block = gr.Blocks().queue()
159
  with block:
160
  with gr.Row():
161
- gr.Markdown("## Stable Diffusion Depth2Img")
162
 
163
  with gr.Row():
164
  with gr.Column():
@@ -187,9 +187,23 @@ with block:
187
  with gr.Column():
188
  gallery = gr.Gallery(label="Generated images", show_label=False).style(
189
  grid=[2], height="auto")
190
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  run_button.click(fn=predict, inputs=[
192
  input_image, prompt, ddim_steps, num_samples, scale, seed, eta, strength], outputs=[gallery])
193
 
194
 
195
- block.launch()
 
158
  block = gr.Blocks().queue()
159
  with block:
160
  with gr.Row():
161
+ gr.Markdown("## Stable Diffusion 2 Depth2Img")
162
 
163
  with gr.Row():
164
  with gr.Column():
 
187
  with gr.Column():
188
  gallery = gr.Gallery(label="Generated images", show_label=False).style(
189
  grid=[2], height="auto")
190
+ gr.Examples(
191
+ examples=[
192
+ ["./examples/baby.jpg",
193
+ "high definition photo of a baby astronaut space walking at the international space station with earth seeing from above in the background",
194
+ 50, 1, 9.0, 123123123, 0.0, 0.8],
195
+ ["./examples/gol.jpg",
196
+ "professional photo of a Elmo jumping between two high rises, beautiful colorful city landscape in the background",
197
+ 50, 1, 9.0, 1734133747, 0.0, 0.9]
198
+ ],
199
+ inputs=[input_image, prompt, ddim_steps,
200
+ num_samples, scale, seed, eta, strength],
201
+ outputs=[gallery],
202
+ fn=predict,
203
+ cache_examples=True,
204
+ )
205
  run_button.click(fn=predict, inputs=[
206
  input_image, prompt, ddim_steps, num_samples, scale, seed, eta, strength], outputs=[gallery])
207
 
208
 
209
+ block.launch(show_api=False)