zhiweili commited on
Commit
85a227c
1 Parent(s): 518257b

change to app_haircolor_inpaint_15

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. app_haircolor_inpaint_15.py +13 -13
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
 
3
- from app_haircolor_pix2pix_sdxl import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
 
1
  import gradio as gr
2
 
3
+ from app_haircolor_inpaint_15 import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
app_haircolor_inpaint_15.py CHANGED
@@ -24,13 +24,13 @@ from controlnet_aux import (
24
  HEDdetector,
25
  )
26
 
27
- # BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-v1-5"
28
  # BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-inpainting"
29
- BASE_MODEL = "SG161222/Realistic_Vision_V2.0"
30
 
31
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
32
 
33
- DEFAULT_EDIT_PROMPT = "a woman, blue hair, high detailed"
34
  DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
35
 
36
  DEFAULT_CATEGORY = "hair"
@@ -47,11 +47,11 @@ hed_detector = hed_detector.to(DEVICE)
47
 
48
  controlnet = [
49
  ControlNetModel.from_pretrained(
50
- "lllyasviel/control_v11p_sd15_lineart",
51
  torch_dtype=torch.float16,
52
  ),
53
  ControlNetModel.from_pretrained(
54
- "lllyasviel/control_v11p_sd15_softedge",
55
  torch_dtype=torch.float16,
56
  ),
57
  ]
@@ -59,7 +59,7 @@ controlnet = [
59
  basepipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
60
  BASE_MODEL,
61
  torch_dtype=torch.float16,
62
- # use_safetensors=True,
63
  controlnet=controlnet,
64
  )
65
  # basepipeline.scheduler = DDIMScheduler.from_config(basepipeline.scheduler.config)
@@ -78,8 +78,8 @@ def image_to_image(
78
  num_steps: int,
79
  guidance_scale: float,
80
  generate_size: int,
81
- cond_scale1: float = 1.2,
82
- cond_scale2: float = 1.2,
83
  ):
84
  run_task_time = 0
85
  time_cost_str = ''
@@ -87,8 +87,8 @@ def image_to_image(
87
  # canny_image = canny_detector(input_image, int(generate_size*1), generate_size)
88
  lineart_image = lineart_detector(input_image, 384, generate_size)
89
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
90
- pidiNet_image = pidiNet_detector(input_image, 512, generate_size)
91
- control_image = [lineart_image, pidiNet_image]
92
 
93
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
94
  generated_image = basepipeline(
@@ -138,12 +138,12 @@ def create_demo() -> gr.Blocks:
138
  edit_prompt = gr.Textbox(lines=1, label="Edit Prompt", value=DEFAULT_EDIT_PROMPT)
139
  generate_size = gr.Number(label="Generate Size", value=512)
140
  with gr.Column():
141
- num_steps = gr.Slider(minimum=1, maximum=100, value=25, step=1, label="Num Steps")
142
  guidance_scale = gr.Slider(minimum=0, maximum=30, value=5, step=0.5, label="Guidance Scale")
143
  with gr.Column():
144
  with gr.Accordion("Advanced Options", open=False):
145
- cond_scale1 = gr.Slider(minimum=0, maximum=3, value=1.2, step=0.1, label="Lineart Scale")
146
- cond_scale2 = gr.Slider(minimum=0, maximum=3, value=1.2, step=0.1, label="PidiNet Scale")
147
  mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
148
  mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
149
  seed = gr.Number(label="Seed", value=8)
 
24
  HEDdetector,
25
  )
26
 
27
+ BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-v1-5"
28
  # BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-inpainting"
29
+ # BASE_MODEL = "SG161222/Realistic_Vision_V2.0"
30
 
31
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
32
 
33
+ DEFAULT_EDIT_PROMPT = "change hair to linen blonde"
34
  DEFAULT_NEGATIVE_PROMPT = "worst quality, normal quality, low quality, low res, blurry, text, watermark, logo, banner, extra digits, cropped, jpeg artifacts, signature, username, error, sketch ,duplicate, ugly, monochrome, horror, geometry, mutation, disgusting, poorly drawn face, bad face, fused face, ugly face, worst face, asymmetrical, unrealistic skin texture, bad proportions, out of frame, poorly drawn hands, cloned face, double face"
35
 
36
  DEFAULT_CATEGORY = "hair"
 
47
 
48
  controlnet = [
49
  ControlNetModel.from_pretrained(
50
+ "lllyasviel/control_v11e_sd15_ip2p",
51
  torch_dtype=torch.float16,
52
  ),
53
  ControlNetModel.from_pretrained(
54
+ "lllyasviel/control_v11p_sd15_lineart",
55
  torch_dtype=torch.float16,
56
  ),
57
  ]
 
59
  basepipeline = StableDiffusionControlNetInpaintPipeline.from_pretrained(
60
  BASE_MODEL,
61
  torch_dtype=torch.float16,
62
+ use_safetensors=True,
63
  controlnet=controlnet,
64
  )
65
  # basepipeline.scheduler = DDIMScheduler.from_config(basepipeline.scheduler.config)
 
78
  num_steps: int,
79
  guidance_scale: float,
80
  generate_size: int,
81
+ cond_scale1: float = 1.0,
82
+ cond_scale2: float = 0.6,
83
  ):
84
  run_task_time = 0
85
  time_cost_str = ''
 
87
  # canny_image = canny_detector(input_image, int(generate_size*1), generate_size)
88
  lineart_image = lineart_detector(input_image, 384, generate_size)
89
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
90
+ # pidiNet_image = pidiNet_detector(input_image, 512, generate_size)
91
+ control_image = [lineart_image, input_image]
92
 
93
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
94
  generated_image = basepipeline(
 
138
  edit_prompt = gr.Textbox(lines=1, label="Edit Prompt", value=DEFAULT_EDIT_PROMPT)
139
  generate_size = gr.Number(label="Generate Size", value=512)
140
  with gr.Column():
141
+ num_steps = gr.Slider(minimum=1, maximum=100, value=20, step=1, label="Num Steps")
142
  guidance_scale = gr.Slider(minimum=0, maximum=30, value=5, step=0.5, label="Guidance Scale")
143
  with gr.Column():
144
  with gr.Accordion("Advanced Options", open=False):
145
+ cond_scale1 = gr.Slider(minimum=0, maximum=3, value=1, step=0.1, label="Cond Scale1")
146
+ cond_scale2 = gr.Slider(minimum=0, maximum=3, value=0.6, step=0.1, label="Cond Scale2")
147
  mask_expansion = gr.Number(label="Mask Expansion", value=50, visible=True)
148
  mask_dilation = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Mask Dilation")
149
  seed = gr.Number(label="Seed", value=8)