zhiweili commited on
Commit
bca475d
1 Parent(s): 91bb531

test control img2img

Browse files
Files changed (3) hide show
  1. app.py +1 -1
  2. app_civitai.py +6 -0
  3. app_haircolor_img2img.py +10 -7
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
 
3
- from app_haircolor_refiner import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
 
1
  import gradio as gr
2
 
3
+ from app_haircolor_img2img import create_demo as create_demo_haircolor
4
 
5
  with gr.Blocks(css="style.css") as demo:
6
  with gr.Tabs():
app_civitai.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
4
+
5
+ if not os.path.exists('juggernaut_aftermath-inpainting.safetensors'):
6
+ os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .")
app_haircolor_img2img.py CHANGED
@@ -10,7 +10,7 @@ from segment_utils import(
10
  restore_result,
11
  )
12
  from diffusers import (
13
- StableDiffusionControlNetPipeline,
14
  ControlNetModel,
15
  DDIMScheduler,
16
  DPMSolverMultistepScheduler,
@@ -45,16 +45,16 @@ hed_detector = hed_detector.to(DEVICE)
45
 
46
  controlnet = [
47
  ControlNetModel.from_pretrained(
48
- "lllyasviel/control_v11e_sd15_ip2p",
49
  torch_dtype=torch.float16,
50
  ),
51
  ControlNetModel.from_pretrained(
52
- "lllyasviel/control_v11p_sd15_lineart",
53
  torch_dtype=torch.float16,
54
  ),
55
  ]
56
 
57
- basepipeline = StableDiffusionControlNetPipeline.from_pretrained(
58
  BASE_MODEL,
59
  torch_dtype=torch.float16,
60
  use_safetensors=True,
@@ -82,15 +82,18 @@ def image_to_image(
82
  run_task_time = 0
83
  time_cost_str = ''
84
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
85
- lineart_image = lineart_detector(input_image, 384, generate_size)
 
 
86
 
87
- cond_image = [input_image, lineart_image]
88
 
89
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
90
  generated_image = basepipeline(
91
  generator=generator,
92
  prompt=edit_prompt,
93
- image=cond_image,
 
94
  height=generate_size,
95
  width=generate_size,
96
  guidance_scale=guidance_scale,
 
10
  restore_result,
11
  )
12
  from diffusers import (
13
+ StableDiffusionControlNetImg2ImgPipeline,
14
  ControlNetModel,
15
  DDIMScheduler,
16
  DPMSolverMultistepScheduler,
 
45
 
46
  controlnet = [
47
  ControlNetModel.from_pretrained(
48
+ "lllyasviel/control_v11p_sd15_lineart",
49
  torch_dtype=torch.float16,
50
  ),
51
  ControlNetModel.from_pretrained(
52
+ "lllyasviel/control_v11p_sd15_softedge",
53
  torch_dtype=torch.float16,
54
  ),
55
  ]
56
 
57
+ basepipeline = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
58
  BASE_MODEL,
59
  torch_dtype=torch.float16,
60
  use_safetensors=True,
 
82
  run_task_time = 0
83
  time_cost_str = ''
84
  run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
85
+ lineart_image = lineart_detector(input_image, 768, generate_size)
86
+
87
+ pidinet_image = pidiNet_detector(input_image, 768, generate_size)
88
 
89
+ cond_image = [lineart_image, pidinet_image]
90
 
91
  generator = torch.Generator(device=DEVICE).manual_seed(seed)
92
  generated_image = basepipeline(
93
  generator=generator,
94
  prompt=edit_prompt,
95
+ image=input_image,
96
+ control_image=cond_image,
97
  height=generate_size,
98
  width=generate_size,
99
  guidance_scale=guidance_scale,