Text-to-Image
Diffusers
stable-diffusion
PeterL1n commited on
Commit
d2ebdfc
1 Parent(s): b489940

Update readme

Browse files
Files changed (1) hide show
  1. README.md +9 -7
README.md CHANGED
@@ -25,7 +25,7 @@ Please always use the correct checkpoint for the corresponding inference steps.
25
 
26
  ```python
27
  import torch
28
- from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler
29
  from huggingface_hub import hf_hub_download
30
 
31
  base = "stabilityai/stable-diffusion-xl-base-1.0"
@@ -33,8 +33,9 @@ repo = "ByteDance/SDXL-Lightning"
33
  ckpt = "sdxl_lightning_4step_unet.pth" # Use the correct ckpt for your step setting!
34
 
35
  # Load model.
36
- pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
37
- pipe.unet.load_state_dict(torch.load(hf_hub_download(repo, ckpt), map_location="cuda"))
 
38
 
39
  # Ensure sampler uses "trailing" timesteps.
40
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
@@ -72,7 +73,7 @@ The 1-step model uses "sample" prediction instead of "epsilon" prediction! The s
72
 
73
  ```python
74
  import torch
75
- from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler
76
  from huggingface_hub import hf_hub_download
77
 
78
  base = "stabilityai/stable-diffusion-xl-base-1.0"
@@ -80,8 +81,9 @@ repo = "ByteDance/SDXL-Lightning"
80
  ckpt = "sdxl_lightning_1step_unet_x0.pth" # Use the correct ckpt for your step setting!
81
 
82
  # Load model.
83
- pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
84
- pipe.unet.load_state_dict(torch.load(hf_hub_download(repo, ckpt), map_location="cuda"))
 
85
 
86
  # Ensure sampler uses "trailing" timesteps and "sample" prediction type.
87
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
@@ -112,4 +114,4 @@ Please use Euler sampler with sgm_uniform scheduler.
112
 
113
  ### 1-Step UNet
114
 
115
- ComfyUI does not support changing model formulation to x0-prediction, so it is not usable in ComfyUI yet. Hopefully ComfyUI gets updated soon.
 
25
 
26
  ```python
27
  import torch
28
+ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
29
  from huggingface_hub import hf_hub_download
30
 
31
  base = "stabilityai/stable-diffusion-xl-base-1.0"
 
33
  ckpt = "sdxl_lightning_4step_unet.pth" # Use the correct ckpt for your step setting!
34
 
35
  # Load model.
36
+ unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
37
+ unet.load_state_dict(torch.load(hf_hub_download(repo, ckpt), map_location="cuda"))
38
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
39
 
40
  # Ensure sampler uses "trailing" timesteps.
41
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
 
73
 
74
  ```python
75
  import torch
76
+ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
77
  from huggingface_hub import hf_hub_download
78
 
79
  base = "stabilityai/stable-diffusion-xl-base-1.0"
 
81
  ckpt = "sdxl_lightning_1step_unet_x0.pth" # Use the correct ckpt for your step setting!
82
 
83
  # Load model.
84
+ unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
85
+ unet.load_state_dict(torch.load(hf_hub_download(repo, ckpt), map_location="cuda"))
86
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cuda")
87
 
88
  # Ensure sampler uses "trailing" timesteps and "sample" prediction type.
89
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
 
114
 
115
  ### 1-Step UNet
116
 
117
+ ComfyUI does not support changing the model formulation to x0-prediction, so it is not usable in ComfyUI yet. Hopefully, ComfyUI will get updated soon.