Spaces:
Running
on
Zero
Running
on
Zero
zhiweili
commited on
Commit
•
fb2eb91
1
Parent(s):
d60325b
change to controlnet-p2p app
Browse files- app.py +1 -1
- app_haircolor_img2img.py +7 -6
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
from
|
4 |
|
5 |
with gr.Blocks(css="style.css") as demo:
|
6 |
with gr.Tabs():
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
from app_haircolor_img2img import create_demo as create_demo_haircolor
|
4 |
|
5 |
with gr.Blocks(css="style.css") as demo:
|
6 |
with gr.Tabs():
|
app_haircolor_img2img.py
CHANGED
@@ -29,8 +29,7 @@ BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
|
29 |
|
30 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
31 |
|
32 |
-
DEFAULT_EDIT_PROMPT = "change to blue
|
33 |
-
DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
|
34 |
|
35 |
DEFAULT_CATEGORY = "hair"
|
36 |
|
@@ -75,6 +74,7 @@ def image_to_image(
|
|
75 |
seed: int,
|
76 |
num_steps: int,
|
77 |
guidance_scale: float,
|
|
|
78 |
generate_size: int,
|
79 |
cond_scale1: float = 1.2,
|
80 |
cond_scale2: float = 1.2,
|
@@ -82,19 +82,19 @@ def image_to_image(
|
|
82 |
run_task_time = 0
|
83 |
time_cost_str = ''
|
84 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
85 |
-
|
86 |
|
87 |
-
cond_image = [input_image,
|
88 |
|
89 |
generator = torch.Generator(device=DEVICE).manual_seed(seed)
|
90 |
generated_image = basepipeline(
|
91 |
generator=generator,
|
92 |
prompt=edit_prompt,
|
93 |
-
negative_prompt=DEFAULT_NEGATIVE_PROMPT,
|
94 |
image=cond_image,
|
95 |
height=generate_size,
|
96 |
width=generate_size,
|
97 |
guidance_scale=guidance_scale,
|
|
|
98 |
num_inference_steps=num_steps,
|
99 |
controlnet_conditioning_scale=[cond_scale1, cond_scale2],
|
100 |
).images[0]
|
@@ -134,6 +134,7 @@ def create_demo() -> gr.Blocks:
|
|
134 |
with gr.Column():
|
135 |
num_steps = gr.Slider(minimum=1, maximum=100, value=20, step=1, label="Num Steps")
|
136 |
guidance_scale = gr.Slider(minimum=0, maximum=30, value=5, step=0.5, label="Guidance Scale")
|
|
|
137 |
with gr.Column():
|
138 |
with gr.Accordion("Advanced Options", open=False):
|
139 |
mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
|
@@ -160,7 +161,7 @@ def create_demo() -> gr.Blocks:
|
|
160 |
outputs=[origin_area_image, croper],
|
161 |
).success(
|
162 |
fn=image_to_image,
|
163 |
-
inputs=[origin_area_image, edit_prompt,seed, num_steps, guidance_scale, generate_size, cond_scale1, cond_scale2],
|
164 |
outputs=[generated_image, generated_cost],
|
165 |
).success(
|
166 |
fn=restore_result,
|
|
|
29 |
|
30 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
31 |
|
32 |
+
DEFAULT_EDIT_PROMPT = "change hair to blue"
|
|
|
33 |
|
34 |
DEFAULT_CATEGORY = "hair"
|
35 |
|
|
|
74 |
seed: int,
|
75 |
num_steps: int,
|
76 |
guidance_scale: float,
|
77 |
+
image_guidance_scale: float,
|
78 |
generate_size: int,
|
79 |
cond_scale1: float = 1.2,
|
80 |
cond_scale2: float = 1.2,
|
|
|
82 |
run_task_time = 0
|
83 |
time_cost_str = ''
|
84 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
85 |
+
lineart_image = lineart_detector(input_image, 384, generate_size)
|
86 |
|
87 |
+
cond_image = [input_image, lineart_image]
|
88 |
|
89 |
generator = torch.Generator(device=DEVICE).manual_seed(seed)
|
90 |
generated_image = basepipeline(
|
91 |
generator=generator,
|
92 |
prompt=edit_prompt,
|
|
|
93 |
image=cond_image,
|
94 |
height=generate_size,
|
95 |
width=generate_size,
|
96 |
guidance_scale=guidance_scale,
|
97 |
+
image_guidance_scale=image_guidance_scale,
|
98 |
num_inference_steps=num_steps,
|
99 |
controlnet_conditioning_scale=[cond_scale1, cond_scale2],
|
100 |
).images[0]
|
|
|
134 |
with gr.Column():
|
135 |
num_steps = gr.Slider(minimum=1, maximum=100, value=20, step=1, label="Num Steps")
|
136 |
guidance_scale = gr.Slider(minimum=0, maximum=30, value=5, step=0.5, label="Guidance Scale")
|
137 |
+
image_guidance_scale = gr.Slider(minimum=0, maximum=30, value=1.5, step=0.1, label="Image Guidance Scale")
|
138 |
with gr.Column():
|
139 |
with gr.Accordion("Advanced Options", open=False):
|
140 |
mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
|
|
|
161 |
outputs=[origin_area_image, croper],
|
162 |
).success(
|
163 |
fn=image_to_image,
|
164 |
+
inputs=[origin_area_image, edit_prompt,seed, num_steps, guidance_scale, image_guidance_scale, generate_size, cond_scale1, cond_scale2],
|
165 |
outputs=[generated_image, generated_cost],
|
166 |
).success(
|
167 |
fn=restore_result,
|