kadirnar commited on
Commit
9ae63a0
1 Parent(s): a36b097
Files changed (3) hide show
  1. app.py +65 -2
  2. requirements.txt +2 -1
  3. utils/inpaint.py +53 -0
app.py CHANGED
@@ -1,6 +1,6 @@
1
  from utils.image2image import stable_diffusion_img2img
2
  from utils.text2image import stable_diffusion_text2img
3
-
4
  import gradio as gr
5
 
6
  stable_model_list = [
@@ -10,6 +10,12 @@ stable_model_list = [
10
  "stabilityai/stable-diffusion-2-1",
11
  "stabilityai/stable-diffusion-2-1-base"
12
  ]
 
 
 
 
 
 
13
  stable_prompt_list = [
14
  "a photo of a man.",
15
  "a photo of a girl."
@@ -21,7 +27,7 @@ stable_negative_prompt_list = [
21
  ]
22
  app = gr.Blocks()
23
  with app:
24
- gr.Markdown("# **<h2 align='center'>Stable Diffusion WebUI<h2>**")
25
  gr.Markdown(
26
  """
27
  <h5 style='text-align: center'>
@@ -127,6 +133,50 @@ with app:
127
 
128
  image2image_predict = gr.Button(value='Generator')
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  with gr.Tab('Generator'):
132
  with gr.Column():
@@ -159,4 +209,17 @@ with app:
159
  outputs = [output_image],
160
  )
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  app.launch()
 
1
  from utils.image2image import stable_diffusion_img2img
2
  from utils.text2image import stable_diffusion_text2img
3
+ from utils.inpaint import stable_diffusion_inpaint
4
  import gradio as gr
5
 
6
  stable_model_list = [
 
10
  "stabilityai/stable-diffusion-2-1",
11
  "stabilityai/stable-diffusion-2-1-base"
12
  ]
13
+
14
+ stable_inpiant_model_list = [
15
+ "stabilityai/stable-diffusion-2-inpainting",
16
+ "runwayml/stable-diffusion-inpainting"
17
+ ]
18
+
19
  stable_prompt_list = [
20
  "a photo of a man.",
21
  "a photo of a girl."
 
27
  ]
28
  app = gr.Blocks()
29
  with app:
30
+ gr.Markdown("# **<h2 align='center'>Stable Diffusion + ControlNet WebUI<h2>**")
31
  gr.Markdown(
32
  """
33
  <h5 style='text-align: center'>
 
133
 
134
  image2image_predict = gr.Button(value='Generator')
135
 
136
+ with gr.Tab('Inpaint'):
137
+ inpaint_image_file = gr.Image(
138
+ source="upload",
139
+ type="numpy",
140
+ tool="sketch",
141
+ elem_id="source_container"
142
+ )
143
+
144
+ inpaint_model_id = gr.Dropdown(
145
+ choices=stable_inpiant_model_list,
146
+ value=stable_inpiant_model_list[0],
147
+ label='Inpaint Model Id'
148
+ )
149
+
150
+ inpaint_prompt = gr.Textbox(
151
+ lines=1,
152
+ value=stable_prompt_list[0],
153
+ label='Prompt'
154
+ )
155
+
156
+ inpaint_negative_prompt = gr.Textbox(
157
+ lines=1,
158
+ value=stable_negative_prompt_list[0],
159
+ label='Negative Prompt'
160
+ )
161
+
162
+ with gr.Accordion("Advanced Options", open=False):
163
+ inpaint_guidance_scale = gr.Slider(
164
+ minimum=0.1,
165
+ maximum=15,
166
+ step=0.1,
167
+ value=7.5,
168
+ label='Guidance Scale'
169
+ )
170
+
171
+ inpaint_num_inference_step = gr.Slider(
172
+ minimum=1,
173
+ maximum=100,
174
+ step=1,
175
+ value=50,
176
+ label='Num Inference Step'
177
+ )
178
+
179
+ inpaint_predict = gr.Button(value='Generator')
180
 
181
  with gr.Tab('Generator'):
182
  with gr.Column():
 
209
  outputs = [output_image],
210
  )
211
 
212
+ inpaint_predict.click(
213
+ fn = stable_diffusion_inpaint,
214
+ inputs = [
215
+ inpaint_image_file,
216
+ inpaint_model_id,
217
+ inpaint_prompt,
218
+ inpaint_negative_prompt,
219
+ inpaint_guidance_scale,
220
+ inpaint_num_inference_step,
221
+ ],
222
+ outputs = [output_image],
223
+ )
224
+
225
  app.launch()
requirements.txt CHANGED
@@ -2,4 +2,5 @@ transformers
2
  bitsandbytes==0.35.0
3
  xformers
4
  controlnet_aux
5
- diffusers
 
 
2
  bitsandbytes==0.35.0
3
  xformers
4
  controlnet_aux
5
+ diffusers
6
+ imageio
utils/inpaint.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import DiffusionPipeline, DDIMScheduler
2
+ from PIL import Image
3
+ import imageio
4
+ import torch
5
+
6
+ # https://huggingface.co/spaces/Manjushri/SD-2.0-Inpainting-CPU/blob/main/app.py
7
+
8
+ def resize(height,img):
9
+ baseheight = height
10
+ img = Image.open(img)
11
+ hpercent = (baseheight/float(img.size[1]))
12
+ wsize = int((float(img.size[0])*float(hpercent)))
13
+ img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
14
+ return img
15
+
16
+ def img_preprocces(source_img, prompt, negative_prompt):
17
+ imageio.imwrite("data.png", source_img["image"])
18
+ imageio.imwrite("data_mask.png", source_img["mask"])
19
+ src = resize(512, "data.png")
20
+ src.save("src.png")
21
+ mask = resize(512, "data_mask.png")
22
+ mask.save("mask.png")
23
+ return src, mask
24
+
25
+ def stable_diffusion_inpaint(
26
+ image_path:str,
27
+ model_path:str,
28
+ prompt:str,
29
+ negative_prompt:str,
30
+ guidance_scale:int,
31
+ num_inference_step:int,
32
+ ):
33
+
34
+ image, mask_image = img_preprocces(image_path, prompt, negative_prompt)
35
+ pipe = DiffusionPipeline.from_pretrained(
36
+ model_path,
37
+ revision="fp16",
38
+ torch_dtype=torch.float16,
39
+ )
40
+ pipe.to('cuda')
41
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
42
+ pipe.enable_xformers_memory_efficient_attention()
43
+
44
+ output = pipe(
45
+ prompt = prompt,
46
+ image = image,
47
+ mask_image=mask_image,
48
+ negative_prompt = negative_prompt,
49
+ num_inference_steps = num_inference_step,
50
+ guidance_scale = guidance_scale,
51
+ ).images
52
+
53
+ return output