Spaces:
Running
on
Zero
Running
on
Zero
zhiweili
commited on
Commit
•
d60325b
1
Parent(s):
f3d17f0
reset p2p app
Browse files- app_haircolor_pix2pix.py +3 -27
app_haircolor_pix2pix.py
CHANGED
@@ -11,10 +11,8 @@ from segment_utils import(
|
|
11 |
restore_result,
|
12 |
)
|
13 |
from diffusers import (
|
14 |
-
DiffusionPipeline,
|
15 |
StableDiffusionInstructPix2PixPipeline,
|
16 |
EulerAncestralDiscreteScheduler,
|
17 |
-
T2IAdapter,
|
18 |
)
|
19 |
|
20 |
from controlnet_aux import (
|
@@ -28,25 +26,15 @@ BASE_MODEL = "timbrooks/instruct-pix2pix"
|
|
28 |
|
29 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
|
31 |
-
DEFAULT_EDIT_PROMPT = "
|
32 |
DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
|
33 |
|
34 |
DEFAULT_CATEGORY = "hair"
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
adapter = T2IAdapter.from_pretrained(
|
39 |
-
"TencentARC/t2iadapter_canny_sd15v2",
|
40 |
-
torch_dtype=torch.float16,
|
41 |
-
varient="fp16",
|
42 |
-
)
|
43 |
-
|
44 |
-
basepipeline = DiffusionPipeline.from_pretrained(
|
45 |
BASE_MODEL,
|
46 |
torch_dtype=torch.float16,
|
47 |
use_safetensors=True,
|
48 |
-
adapter=adapter,
|
49 |
-
custom_pipeline="./pipelines/pipeline_sd_adapter_p2p.py",
|
50 |
)
|
51 |
|
52 |
basepipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(basepipeline.scheduler.config)
|
@@ -64,30 +52,19 @@ def image_to_image(
|
|
64 |
guidance_scale: float,
|
65 |
image_guidance_scale: float,
|
66 |
generate_size: int,
|
67 |
-
cond_scale1: float = 1.2,
|
68 |
):
|
69 |
run_task_time = 0
|
70 |
time_cost_str = ''
|
71 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
72 |
-
canny_image = canny_detector(input_image)
|
73 |
-
canny_image = canny_image.convert("L")
|
74 |
-
|
75 |
-
cond_image = canny_image
|
76 |
-
cond_scale = cond_scale1
|
77 |
|
78 |
generator = torch.Generator(device=DEVICE).manual_seed(seed)
|
79 |
generated_image = basepipeline(
|
80 |
generator=generator,
|
81 |
prompt=edit_prompt,
|
82 |
-
negative_prompt=DEFAULT_NEGATIVE_PROMPT,
|
83 |
image=input_image,
|
84 |
-
height=generate_size,
|
85 |
-
width=generate_size,
|
86 |
guidance_scale=guidance_scale,
|
87 |
image_guidance_scale=image_guidance_scale,
|
88 |
num_inference_steps=num_steps,
|
89 |
-
adapter_image=cond_image,
|
90 |
-
adapter_conditioning_scale=cond_scale,
|
91 |
).images[0]
|
92 |
|
93 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
@@ -122,7 +99,6 @@ def create_demo() -> gr.Blocks:
|
|
122 |
mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
|
123 |
seed = gr.Number(label="Seed", value=8)
|
124 |
category = gr.Textbox(label="Category", value=DEFAULT_CATEGORY, visible=False)
|
125 |
-
cond_scale1 = gr.Slider(minimum=0, maximum=3, value=1.2, step=0.1, label="Cond_scale1")
|
126 |
g_btn = gr.Button("Edit Image")
|
127 |
|
128 |
with gr.Row():
|
@@ -141,7 +117,7 @@ def create_demo() -> gr.Blocks:
|
|
141 |
outputs=[origin_area_image, croper],
|
142 |
).success(
|
143 |
fn=image_to_image,
|
144 |
-
inputs=[origin_area_image, edit_prompt,seed, num_steps, guidance_scale, image_guidance_scale, generate_size
|
145 |
outputs=[generated_image, generated_cost],
|
146 |
).success(
|
147 |
fn=restore_result,
|
|
|
11 |
restore_result,
|
12 |
)
|
13 |
from diffusers import (
|
|
|
14 |
StableDiffusionInstructPix2PixPipeline,
|
15 |
EulerAncestralDiscreteScheduler,
|
|
|
16 |
)
|
17 |
|
18 |
from controlnet_aux import (
|
|
|
26 |
|
27 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
28 |
|
29 |
+
DEFAULT_EDIT_PROMPT = "turn hair into blue"
|
30 |
DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
|
31 |
|
32 |
DEFAULT_CATEGORY = "hair"
|
33 |
|
34 |
+
basepipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
BASE_MODEL,
|
36 |
torch_dtype=torch.float16,
|
37 |
use_safetensors=True,
|
|
|
|
|
38 |
)
|
39 |
|
40 |
basepipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(basepipeline.scheduler.config)
|
|
|
52 |
guidance_scale: float,
|
53 |
image_guidance_scale: float,
|
54 |
generate_size: int,
|
|
|
55 |
):
|
56 |
run_task_time = 0
|
57 |
time_cost_str = ''
|
58 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
generator = torch.Generator(device=DEVICE).manual_seed(seed)
|
61 |
generated_image = basepipeline(
|
62 |
generator=generator,
|
63 |
prompt=edit_prompt,
|
|
|
64 |
image=input_image,
|
|
|
|
|
65 |
guidance_scale=guidance_scale,
|
66 |
image_guidance_scale=image_guidance_scale,
|
67 |
num_inference_steps=num_steps,
|
|
|
|
|
68 |
).images[0]
|
69 |
|
70 |
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
|
|
|
99 |
mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
|
100 |
seed = gr.Number(label="Seed", value=8)
|
101 |
category = gr.Textbox(label="Category", value=DEFAULT_CATEGORY, visible=False)
|
|
|
102 |
g_btn = gr.Button("Edit Image")
|
103 |
|
104 |
with gr.Row():
|
|
|
117 |
outputs=[origin_area_image, croper],
|
118 |
).success(
|
119 |
fn=image_to_image,
|
120 |
+
inputs=[origin_area_image, edit_prompt,seed, num_steps, guidance_scale, image_guidance_scale, generate_size],
|
121 |
outputs=[generated_image, generated_cost],
|
122 |
).success(
|
123 |
fn=restore_result,
|