amazonaws-la commited on
Commit
5e11dd4
1 Parent(s): a1bc4f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -7,12 +7,10 @@ import random
7
 
8
  import gradio as gr
9
  import numpy as np
 
10
  import spaces
11
- import requests
12
  import torch
13
- import PIL.Image
14
- from io import BytesIO
15
- from diffusers import StableDiffusionImg2ImgPipeline, AutoencoderKL, DiffusionPipeline
16
 
17
  DESCRIPTION = "# SDXL"
18
  if not torch.cuda.is_available():
@@ -58,12 +56,11 @@ def generate(
58
  vaecall = 'stabilityai/sd-vae-ft-mse',
59
  lora = 'amazonaws-la/juliette',
60
  lora_scale: float = 0.7,
61
- url = "https://m.media-amazon.com/images/I/81zPcrN6m+L.jpg",
62
  ) -> PIL.Image.Image:
63
  if torch.cuda.is_available():
64
-
65
- if not use_vae:
66
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model, torch_dtype=torch.float16)
67
 
68
  if use_vae:
69
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
@@ -72,7 +69,7 @@ def generate(
72
  if use_lora:
73
  pipe.load_lora_weights(lora)
74
  pipe.fuse_lora(lora_scale=0.7)
75
-
76
  if ENABLE_CPU_OFFLOAD:
77
  pipe.enable_model_cpu_offload()
78
 
@@ -102,7 +99,6 @@ def generate(
102
  guidance_scale=guidance_scale_base,
103
  num_inference_steps=num_inference_steps_base,
104
  generator=generator,
105
- image=url,
106
  output_type="pil",
107
  ).images[0]
108
  else:
 
7
 
8
  import gradio as gr
9
  import numpy as np
10
+ import PIL.Image
11
  import spaces
 
12
  import torch
13
+ from diffusers import AutoencoderKL, DiffusionPipeline
 
 
14
 
15
  DESCRIPTION = "# SDXL"
16
  if not torch.cuda.is_available():
 
56
  vaecall = 'stabilityai/sd-vae-ft-mse',
57
  lora = 'amazonaws-la/juliette',
58
  lora_scale: float = 0.7,
 
59
  ) -> PIL.Image.Image:
60
  if torch.cuda.is_available():
61
+
62
+ if not use_vae:
63
+ pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
64
 
65
  if use_vae:
66
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
 
69
  if use_lora:
70
  pipe.load_lora_weights(lora)
71
  pipe.fuse_lora(lora_scale=0.7)
72
+
73
  if ENABLE_CPU_OFFLOAD:
74
  pipe.enable_model_cpu_offload()
75
 
 
99
  guidance_scale=guidance_scale_base,
100
  num_inference_steps=num_inference_steps_base,
101
  generator=generator,
 
102
  output_type="pil",
103
  ).images[0]
104
  else: