Warlord-K commited on
Commit
ccd4e3b
1 Parent(s): 5dab465

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,8 +12,8 @@ import torch
12
  from diffusers import AutoencoderKL, StableDiffusionXLPipeline
13
  import uuid
14
 
15
- DESCRIPTION = '''# Segmind Stable Diffusion: SSD-1B
16
- #### [Segmind's SSD-1B](https://huggingface.co/segmind/SSD-1B) is a distilled, 50% smaller version of SDXL, offering up to 60% speedup
17
  '''
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
@@ -95,7 +95,7 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
95
  if torch.cuda.is_available():
96
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
97
  pipe = StableDiffusionXLPipeline.from_pretrained(
98
- "segmind/SSD-1B",
99
  vae=vae,
100
  torch_dtype=torch.float16,
101
  use_safetensors=True,
 
12
  from diffusers import AutoencoderKL, StableDiffusionXLPipeline
13
  import uuid
14
 
15
+ DESCRIPTION = '''# Segmind-Vega
16
+ #### [Segmind-Vega](https://huggingface.co/segmind/Segmind-Vega) is a distilled, 70% smaller version of SDXL, offering up to 100% speedup
17
  '''
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
 
95
  if torch.cuda.is_available():
96
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
97
  pipe = StableDiffusionXLPipeline.from_pretrained(
98
+ "segmind/Segmind-Vega",
99
  vae=vae,
100
  torch_dtype=torch.float16,
101
  use_safetensors=True,