Linoy Tsaban commited on
Commit
066c23c
1 Parent(s): 6505e1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -22,8 +22,7 @@ def invert(x0, prompt_src="", num_diffusion_steps=100, cfg_scale_src = 3.5, eta
22
  sd_pipe.scheduler.set_timesteps(num_diffusion_steps)
23
 
24
  # vae encode image
25
- with autocast("cuda"), inference_mode():
26
- w0 = (sd_pipe.vae.encode(x0).latent_dist.mode() * 0.18215).float()
27
 
28
  # find Zs and wts - forward process
29
  wt, zs, wts = inversion_forward_process(sd_pipe, w0, etas=eta, prompt=prompt_src, cfg_scale=cfg_scale_src, prog_bar=True, num_inference_steps=num_diffusion_steps)
@@ -37,8 +36,7 @@ def sample(wt, zs, wts, prompt_tar="", cfg_scale_tar=15, skip=36, eta = 1):
37
  w0, _ = inversion_reverse_process(sd_pipe, xT=wts[skip], etas=eta, prompts=[prompt_tar], cfg_scales=[cfg_scale_tar], prog_bar=True, zs=zs[skip:])
38
 
39
  # vae decode image
40
- with autocast("cuda"), inference_mode():
41
- x0_dec = sd_pipe.vae.decode(1 / 0.18215 * w0).sample
42
  if x0_dec.dim()<4:
43
  x0_dec = x0_dec[None,:,:,:]
44
  img = image_grid(x0_dec)
@@ -47,9 +45,9 @@ def sample(wt, zs, wts, prompt_tar="", cfg_scale_tar=15, skip=36, eta = 1):
47
  # load pipelines
48
  sd_model_id = "runwayml/stable-diffusion-v1-5"
49
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
50
- sd_pipe = StableDiffusionPipeline.from_pretrained(sd_model_id).to(device)
51
  sd_pipe.scheduler = DDIMScheduler.from_config(sd_model_id, subfolder = "scheduler")
52
- sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id).to(device)
53
 
54
 
55
  def edit(input_image, input_image_prompt='', target_prompt='', edit_prompt='',
 
22
  sd_pipe.scheduler.set_timesteps(num_diffusion_steps)
23
 
24
  # vae encode image
25
+ w0 = (sd_pipe.vae.encode(x0).latent_dist.mode() * 0.18215).float()
 
26
 
27
  # find Zs and wts - forward process
28
  wt, zs, wts = inversion_forward_process(sd_pipe, w0, etas=eta, prompt=prompt_src, cfg_scale=cfg_scale_src, prog_bar=True, num_inference_steps=num_diffusion_steps)
 
36
  w0, _ = inversion_reverse_process(sd_pipe, xT=wts[skip], etas=eta, prompts=[prompt_tar], cfg_scales=[cfg_scale_tar], prog_bar=True, zs=zs[skip:])
37
 
38
  # vae decode image
39
+ x0_dec = sd_pipe.vae.decode(1 / 0.18215 * w0).sample
 
40
  if x0_dec.dim()<4:
41
  x0_dec = x0_dec[None,:,:,:]
42
  img = image_grid(x0_dec)
 
45
  # load pipelines
46
  sd_model_id = "runwayml/stable-diffusion-v1-5"
47
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
48
+ sd_pipe = StableDiffusionPipeline.from_pretrained(sd_model_id, ,torch_dtype=torch.float16).to(device)
49
  sd_pipe.scheduler = DDIMScheduler.from_config(sd_model_id, subfolder = "scheduler")
50
+ sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id, ,torch_dtype=torch.float16).to(device)
51
 
52
 
53
  def edit(input_image, input_image_prompt='', target_prompt='', edit_prompt='',