tonyassi commited on
Commit
8dcef4c
1 Parent(s): 19a049d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -6,7 +6,7 @@ from PIL import Image, ImageOps
6
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
7
  pipeline = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
8
 
9
- def generate(image_editor, prompt):
10
  image = image_editor['background'].convert('RGB')
11
  mask = Image.new("RGBA", image_editor["layers"][0].size, "WHITE")
12
  mask.paste(image_editor["layers"][0], (0, 0), image_editor["layers"][0])
@@ -15,7 +15,14 @@ def generate(image_editor, prompt):
15
  image.thumbnail((1024, 1024))
16
  mask.thumbnail((1024, 1024))
17
 
18
- final_image = pipeline(prompt=prompt, image=image, mask_image=mask).images[0]
 
 
 
 
 
 
 
19
 
20
  return image_editor, image, mask, final_image
21
 
@@ -44,6 +51,6 @@ with gr.Blocks() as demo:
44
  out2 = gr.Image()
45
  out3 = gr.Image()
46
 
47
- generate_button.click(fn=generate, inputs=[sketch_pad,prompt], outputs=[sketch_pad, out1, out2, out3])
48
 
49
  demo.launch()
 
6
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
7
  pipeline = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", vae=vae, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("cuda")
8
 
9
+ def generate(image_editor, prompt, neg_prompt, strength, guidance):
10
  image = image_editor['background'].convert('RGB')
11
  mask = Image.new("RGBA", image_editor["layers"][0].size, "WHITE")
12
  mask.paste(image_editor["layers"][0], (0, 0), image_editor["layers"][0])
 
15
  image.thumbnail((1024, 1024))
16
  mask.thumbnail((1024, 1024))
17
 
18
+ final_image = pipeline(prompt=prompt,
19
+ image=image,
20
+ mask_image=mask,
21
+ width=image.width,
22
+ height=image.height,
23
+ num_inference_steps=50,
24
+ strength=strength,
25
+ guidance_scale=guidance).images[0]
26
 
27
  return image_editor, image, mask, final_image
28
 
 
51
  out2 = gr.Image()
52
  out3 = gr.Image()
53
 
54
+ generate_button.click(fn=generate, inputs=[sketch_pad,prompt, neg_prompt, strength_slider, guidance_slider], outputs=[sketch_pad, out1, out2, out3])
55
 
56
  demo.launch()