Spaces:
Runtime error
Runtime error
Update
Browse files- README.md +1 -2
- app_generated_image.py +2 -4
- app_real_image.py +2 -4
- requirements.txt +2 -2
README.md
CHANGED
@@ -4,8 +4,7 @@ emoji: 🐢
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
-
python_version: 3.10.11
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
suggested_hardware: a10g-small
|
|
|
4 |
colorFrom: blue
|
5 |
colorTo: pink
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.36.1
|
|
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
suggested_hardware: a10g-small
|
app_generated_image.py
CHANGED
@@ -2,7 +2,6 @@
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
5 |
-
import os
|
6 |
import pathlib
|
7 |
import shlex
|
8 |
import subprocess
|
@@ -122,7 +121,7 @@ def process_example(source_prompt: str, seed: int, translation_prompt: str) -> t
|
|
122 |
|
123 |
def create_prompt_demo() -> gr.Blocks:
|
124 |
with gr.Blocks() as demo:
|
125 |
-
with gr.
|
126 |
gr.Markdown("Step 1 (This step will take about 1.5 minutes on A10G.)")
|
127 |
with gr.Row():
|
128 |
with gr.Column():
|
@@ -137,7 +136,7 @@ def create_prompt_demo() -> gr.Blocks:
|
|
137 |
with gr.Column():
|
138 |
generated_image = gr.Image(label="Generated image", type="filepath")
|
139 |
exp_name = gr.Text(visible=False)
|
140 |
-
with gr.
|
141 |
gr.Markdown("Step 2 (This step will take about 1.5 minutes on A10G.)")
|
142 |
with gr.Row():
|
143 |
with gr.Column():
|
@@ -177,7 +176,6 @@ def create_prompt_demo() -> gr.Blocks:
|
|
177 |
result,
|
178 |
],
|
179 |
fn=process_example,
|
180 |
-
cache_examples=os.getenv("CACHE_EXAMPLES"),
|
181 |
)
|
182 |
|
183 |
extract_feature_button.click(
|
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
|
|
5 |
import pathlib
|
6 |
import shlex
|
7 |
import subprocess
|
|
|
121 |
|
122 |
def create_prompt_demo() -> gr.Blocks:
|
123 |
with gr.Blocks() as demo:
|
124 |
+
with gr.Group():
|
125 |
gr.Markdown("Step 1 (This step will take about 1.5 minutes on A10G.)")
|
126 |
with gr.Row():
|
127 |
with gr.Column():
|
|
|
136 |
with gr.Column():
|
137 |
generated_image = gr.Image(label="Generated image", type="filepath")
|
138 |
exp_name = gr.Text(visible=False)
|
139 |
+
with gr.Group():
|
140 |
gr.Markdown("Step 2 (This step will take about 1.5 minutes on A10G.)")
|
141 |
with gr.Row():
|
142 |
with gr.Column():
|
|
|
176 |
result,
|
177 |
],
|
178 |
fn=process_example,
|
|
|
179 |
)
|
180 |
|
181 |
extract_feature_button.click(
|
app_real_image.py
CHANGED
@@ -3,7 +3,6 @@
|
|
3 |
from __future__ import annotations
|
4 |
|
5 |
import hashlib
|
6 |
-
import os
|
7 |
import pathlib
|
8 |
import shlex
|
9 |
import subprocess
|
@@ -109,7 +108,7 @@ def process_example(image: str, translation_prompt: str, negative_prompt: str) -
|
|
109 |
|
110 |
def create_real_image_demo():
|
111 |
with gr.Blocks() as demo:
|
112 |
-
with gr.
|
113 |
gr.Markdown("Step 1 (This step will take about 5 minutes on A10G.)")
|
114 |
with gr.Row():
|
115 |
with gr.Column():
|
@@ -118,7 +117,7 @@ def create_real_image_demo():
|
|
118 |
with gr.Column():
|
119 |
reconstructed_image = gr.Image(label="Reconstructed image", type="filepath")
|
120 |
exp_name = gr.Text(visible=False)
|
121 |
-
with gr.
|
122 |
gr.Markdown("Step 2 (This step will take about 1.5 minutes on A10G.)")
|
123 |
with gr.Row():
|
124 |
with gr.Column():
|
@@ -172,7 +171,6 @@ def create_real_image_demo():
|
|
172 |
result,
|
173 |
],
|
174 |
fn=process_example,
|
175 |
-
cache_examples=os.getenv("CACHE_EXAMPLES"),
|
176 |
)
|
177 |
|
178 |
extract_feature_button.click(
|
|
|
3 |
from __future__ import annotations
|
4 |
|
5 |
import hashlib
|
|
|
6 |
import pathlib
|
7 |
import shlex
|
8 |
import subprocess
|
|
|
108 |
|
109 |
def create_real_image_demo():
|
110 |
with gr.Blocks() as demo:
|
111 |
+
with gr.Group():
|
112 |
gr.Markdown("Step 1 (This step will take about 5 minutes on A10G.)")
|
113 |
with gr.Row():
|
114 |
with gr.Column():
|
|
|
117 |
with gr.Column():
|
118 |
reconstructed_image = gr.Image(label="Reconstructed image", type="filepath")
|
119 |
exp_name = gr.Text(visible=False)
|
120 |
+
with gr.Group():
|
121 |
gr.Markdown("Step 2 (This step will take about 1.5 minutes on A10G.)")
|
122 |
with gr.Row():
|
123 |
with gr.Column():
|
|
|
171 |
result,
|
172 |
],
|
173 |
fn=process_example,
|
|
|
174 |
)
|
175 |
|
176 |
extract_feature_button.click(
|
requirements.txt
CHANGED
@@ -2,8 +2,8 @@ albumentations==1.3.0
|
|
2 |
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
3 |
diffusers==0.12.1
|
4 |
einops==0.6.0
|
5 |
-
gradio==
|
6 |
-
huggingface-hub==0.
|
7 |
imageio==2.25.0
|
8 |
imageio-ffmpeg==0.4.8
|
9 |
kornia==0.6.9
|
|
|
2 |
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
3 |
diffusers==0.12.1
|
4 |
einops==0.6.0
|
5 |
+
gradio==4.36.1
|
6 |
+
huggingface-hub==0.23.3
|
7 |
imageio==2.25.0
|
8 |
imageio-ffmpeg==0.4.8
|
9 |
kornia==0.6.9
|