Datasets:

ArXiv:
Diffusers Bot commited on
Commit
119c56f
1 Parent(s): 10bb986

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. v0.26.3/README.md +0 -0
  2. v0.26.3/bit_diffusion.py +264 -0
  3. v0.26.3/checkpoint_merger.py +282 -0
  4. v0.26.3/clip_guided_images_mixing_stable_diffusion.py +455 -0
  5. v0.26.3/clip_guided_stable_diffusion.py +347 -0
  6. v0.26.3/clip_guided_stable_diffusion_img2img.py +493 -0
  7. v0.26.3/composable_stable_diffusion.py +582 -0
  8. v0.26.3/ddim_noise_comparative_analysis.py +190 -0
  9. v0.26.3/dps_pipeline.py +466 -0
  10. v0.26.3/edict_pipeline.py +264 -0
  11. v0.26.3/gluegen.py +865 -0
  12. v0.26.3/iadb.py +149 -0
  13. v0.26.3/imagic_stable_diffusion.py +496 -0
  14. v0.26.3/img2img_inpainting.py +464 -0
  15. v0.26.3/instaflow_one_step.py +707 -0
  16. v0.26.3/interpolate_stable_diffusion.py +525 -0
  17. v0.26.3/ip_adapter_face_id.py +1525 -0
  18. v0.26.3/latent_consistency_img2img.py +827 -0
  19. v0.26.3/latent_consistency_interpolate.py +1051 -0
  20. v0.26.3/latent_consistency_txt2img.py +728 -0
  21. v0.26.3/llm_grounded_diffusion.py +1613 -0
  22. v0.26.3/lpw_stable_diffusion.py +1471 -0
  23. v0.26.3/lpw_stable_diffusion_onnx.py +1148 -0
  24. v0.26.3/lpw_stable_diffusion_xl.py +0 -0
  25. v0.26.3/magic_mix.py +152 -0
  26. v0.26.3/marigold_depth_estimation.py +601 -0
  27. v0.26.3/masked_stable_diffusion_img2img.py +262 -0
  28. v0.26.3/mixture_canvas.py +501 -0
  29. v0.26.3/mixture_tiling.py +405 -0
  30. v0.26.3/multilingual_stable_diffusion.py +437 -0
  31. v0.26.3/one_step_unet.py +24 -0
  32. v0.26.3/pipeline_animatediff_controlnet.py +1130 -0
  33. v0.26.3/pipeline_animatediff_img2video.py +989 -0
  34. v0.26.3/pipeline_demofusion_sdxl.py +1414 -0
  35. v0.26.3/pipeline_fabric.py +751 -0
  36. v0.26.3/pipeline_null_text_inversion.py +260 -0
  37. v0.26.3/pipeline_prompt2prompt.py +861 -0
  38. v0.26.3/pipeline_sdxl_style_aligned.py +0 -0
  39. v0.26.3/pipeline_stable_diffusion_upscale_ldm3d.py +772 -0
  40. v0.26.3/pipeline_stable_diffusion_xl_controlnet_adapter.py +1463 -0
  41. v0.26.3/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +1908 -0
  42. v0.26.3/pipeline_stable_diffusion_xl_instantid.py +1058 -0
  43. v0.26.3/pipeline_zero1to3.py +893 -0
  44. v0.26.3/regional_prompting_stable_diffusion.py +620 -0
  45. v0.26.3/rerender_a_video.py +1178 -0
  46. v0.26.3/run_onnx_controlnet.py +911 -0
  47. v0.26.3/run_tensorrt_controlnet.py +1022 -0
  48. v0.26.3/scheduling_ufogen.py +525 -0
  49. v0.26.3/sd_text2img_k_diffusion.py +476 -0
  50. v0.26.3/sde_drag.py +594 -0
v0.26.3/README.md ADDED
The diff for this file is too large to render. See raw diff
 
v0.26.3/bit_diffusion.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, Union
2
+
3
+ import torch
4
+ from einops import rearrange, reduce
5
+
6
+ from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel
7
+ from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
8
+ from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
9
+
10
+
11
+ BITS = 8
12
+
13
+
14
+ # convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py
15
+ def decimal_to_bits(x, bits=BITS):
16
+ """expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1"""
17
+ device = x.device
18
+
19
+ x = (x * 255).int().clamp(0, 255)
20
+
21
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device)
22
+ mask = rearrange(mask, "d -> d 1 1")
23
+ x = rearrange(x, "b c h w -> b c 1 h w")
24
+
25
+ bits = ((x & mask) != 0).float()
26
+ bits = rearrange(bits, "b c d h w -> b (c d) h w")
27
+ bits = bits * 2 - 1
28
+ return bits
29
+
30
+
31
+ def bits_to_decimal(x, bits=BITS):
32
+ """expects bits from -1 to 1, outputs image tensor from 0 to 1"""
33
+ device = x.device
34
+
35
+ x = (x > 0).int()
36
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32)
37
+
38
+ mask = rearrange(mask, "d -> d 1 1")
39
+ x = rearrange(x, "b (c d) h w -> b c d h w", d=8)
40
+ dec = reduce(x * mask, "b c d h w -> b c h w", "sum")
41
+ return (dec / 255).clamp(0.0, 1.0)
42
+
43
+
44
+ # modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale
45
+ def ddim_bit_scheduler_step(
46
+ self,
47
+ model_output: torch.FloatTensor,
48
+ timestep: int,
49
+ sample: torch.FloatTensor,
50
+ eta: float = 0.0,
51
+ use_clipped_model_output: bool = True,
52
+ generator=None,
53
+ return_dict: bool = True,
54
+ ) -> Union[DDIMSchedulerOutput, Tuple]:
55
+ """
56
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
57
+ process from the learned model outputs (most often the predicted noise).
58
+ Args:
59
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model.
60
+ timestep (`int`): current discrete timestep in the diffusion chain.
61
+ sample (`torch.FloatTensor`):
62
+ current instance of sample being created by diffusion process.
63
+ eta (`float`): weight of noise for added noise in diffusion step.
64
+ use_clipped_model_output (`bool`): TODO
65
+ generator: random number generator.
66
+ return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
67
+ Returns:
68
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
69
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
70
+ returning a tuple, the first element is the sample tensor.
71
+ """
72
+ if self.num_inference_steps is None:
73
+ raise ValueError(
74
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
75
+ )
76
+
77
+ # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
78
+ # Ideally, read DDIM paper in-detail understanding
79
+
80
+ # Notation (<variable name> -> <name in paper>
81
+ # - pred_noise_t -> e_theta(x_t, t)
82
+ # - pred_original_sample -> f_theta(x_t, t) or x_0
83
+ # - std_dev_t -> sigma_t
84
+ # - eta -> η
85
+ # - pred_sample_direction -> "direction pointing to x_t"
86
+ # - pred_prev_sample -> "x_t-1"
87
+
88
+ # 1. get previous step value (=t-1)
89
+ prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
90
+
91
+ # 2. compute alphas, betas
92
+ alpha_prod_t = self.alphas_cumprod[timestep]
93
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
94
+
95
+ beta_prod_t = 1 - alpha_prod_t
96
+
97
+ # 3. compute predicted original sample from predicted noise also called
98
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
99
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
100
+
101
+ # 4. Clip "predicted x_0"
102
+ scale = self.bit_scale
103
+ if self.config.clip_sample:
104
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
105
+
106
+ # 5. compute variance: "sigma_t(η)" -> see formula (16)
107
+ # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
108
+ variance = self._get_variance(timestep, prev_timestep)
109
+ std_dev_t = eta * variance ** (0.5)
110
+
111
+ if use_clipped_model_output:
112
+ # the model_output is always re-derived from the clipped x_0 in Glide
113
+ model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
114
+
115
+ # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
116
+ pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
117
+
118
+ # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
119
+ prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
120
+
121
+ if eta > 0:
122
+ # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
123
+ device = model_output.device if torch.is_tensor(model_output) else "cpu"
124
+ noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
125
+ variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
126
+
127
+ prev_sample = prev_sample + variance
128
+
129
+ if not return_dict:
130
+ return (prev_sample,)
131
+
132
+ return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
133
+
134
+
135
+ def ddpm_bit_scheduler_step(
136
+ self,
137
+ model_output: torch.FloatTensor,
138
+ timestep: int,
139
+ sample: torch.FloatTensor,
140
+ prediction_type="epsilon",
141
+ generator=None,
142
+ return_dict: bool = True,
143
+ ) -> Union[DDPMSchedulerOutput, Tuple]:
144
+ """
145
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
146
+ process from the learned model outputs (most often the predicted noise).
147
+ Args:
148
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model.
149
+ timestep (`int`): current discrete timestep in the diffusion chain.
150
+ sample (`torch.FloatTensor`):
151
+ current instance of sample being created by diffusion process.
152
+ prediction_type (`str`, default `epsilon`):
153
+ indicates whether the model predicts the noise (epsilon), or the samples (`sample`).
154
+ generator: random number generator.
155
+ return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
156
+ Returns:
157
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
158
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
159
+ returning a tuple, the first element is the sample tensor.
160
+ """
161
+ t = timestep
162
+
163
+ if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
164
+ model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
165
+ else:
166
+ predicted_variance = None
167
+
168
+ # 1. compute alphas, betas
169
+ alpha_prod_t = self.alphas_cumprod[t]
170
+ alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
171
+ beta_prod_t = 1 - alpha_prod_t
172
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
173
+
174
+ # 2. compute predicted original sample from predicted noise also called
175
+ # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
176
+ if prediction_type == "epsilon":
177
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
178
+ elif prediction_type == "sample":
179
+ pred_original_sample = model_output
180
+ else:
181
+ raise ValueError(f"Unsupported prediction_type {prediction_type}.")
182
+
183
+ # 3. Clip "predicted x_0"
184
+ scale = self.bit_scale
185
+ if self.config.clip_sample:
186
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
187
+
188
+ # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
189
+ # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
190
+ pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
191
+ current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
192
+
193
+ # 5. Compute predicted previous sample µ_t
194
+ # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
195
+ pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
196
+
197
+ # 6. Add noise
198
+ variance = 0
199
+ if t > 0:
200
+ noise = torch.randn(
201
+ model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
202
+ ).to(model_output.device)
203
+ variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
204
+
205
+ pred_prev_sample = pred_prev_sample + variance
206
+
207
+ if not return_dict:
208
+ return (pred_prev_sample,)
209
+
210
+ return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
211
+
212
+
213
+ class BitDiffusion(DiffusionPipeline):
214
+ def __init__(
215
+ self,
216
+ unet: UNet2DConditionModel,
217
+ scheduler: Union[DDIMScheduler, DDPMScheduler],
218
+ bit_scale: Optional[float] = 1.0,
219
+ ):
220
+ super().__init__()
221
+ self.bit_scale = bit_scale
222
+ self.scheduler.step = (
223
+ ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step
224
+ )
225
+
226
+ self.register_modules(unet=unet, scheduler=scheduler)
227
+
228
+ @torch.no_grad()
229
+ def __call__(
230
+ self,
231
+ height: Optional[int] = 256,
232
+ width: Optional[int] = 256,
233
+ num_inference_steps: Optional[int] = 50,
234
+ generator: Optional[torch.Generator] = None,
235
+ batch_size: Optional[int] = 1,
236
+ output_type: Optional[str] = "pil",
237
+ return_dict: bool = True,
238
+ **kwargs,
239
+ ) -> Union[Tuple, ImagePipelineOutput]:
240
+ latents = torch.randn(
241
+ (batch_size, self.unet.config.in_channels, height, width),
242
+ generator=generator,
243
+ )
244
+ latents = decimal_to_bits(latents) * self.bit_scale
245
+ latents = latents.to(self.device)
246
+
247
+ self.scheduler.set_timesteps(num_inference_steps)
248
+
249
+ for t in self.progress_bar(self.scheduler.timesteps):
250
+ # predict the noise residual
251
+ noise_pred = self.unet(latents, t).sample
252
+
253
+ # compute the previous noisy sample x_t -> x_t-1
254
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
255
+
256
+ image = bits_to_decimal(latents)
257
+
258
+ if output_type == "pil":
259
+ image = self.numpy_to_pil(image)
260
+
261
+ if not return_dict:
262
+ return (image,)
263
+
264
+ return ImagePipelineOutput(images=image)
v0.26.3/checkpoint_merger.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from typing import Dict, List, Union
4
+
5
+ import safetensors.torch
6
+ import torch
7
+ from huggingface_hub import snapshot_download
8
+ from huggingface_hub.utils import validate_hf_hub_args
9
+
10
+ from diffusers import DiffusionPipeline, __version__
11
+ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
12
+ from diffusers.utils import CONFIG_NAME, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
13
+
14
+
15
+ class CheckpointMergerPipeline(DiffusionPipeline):
16
+ """
17
+ A class that supports merging diffusion models based on the discussion here:
18
+ https://github.com/huggingface/diffusers/issues/877
19
+
20
+ Example usage:-
21
+
22
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
23
+
24
+ merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
25
+
26
+ merged_pipe.to('cuda')
27
+
28
+ prompt = "An astronaut riding a unicycle on Mars"
29
+
30
+ results = merged_pipe(prompt)
31
+
32
+ ## For more details, see the docstring for the merge method.
33
+
34
+ """
35
+
36
+ def __init__(self):
37
+ self.register_to_config()
38
+ super().__init__()
39
+
40
+ def _compare_model_configs(self, dict0, dict1):
41
+ if dict0 == dict1:
42
+ return True
43
+ else:
44
+ config0, meta_keys0 = self._remove_meta_keys(dict0)
45
+ config1, meta_keys1 = self._remove_meta_keys(dict1)
46
+ if config0 == config1:
47
+ print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
48
+ return True
49
+ return False
50
+
51
+ def _remove_meta_keys(self, config_dict: Dict):
52
+ meta_keys = []
53
+ temp_dict = config_dict.copy()
54
+ for key in config_dict.keys():
55
+ if key.startswith("_"):
56
+ temp_dict.pop(key)
57
+ meta_keys.append(key)
58
+ return (temp_dict, meta_keys)
59
+
60
+ @torch.no_grad()
61
+ @validate_hf_hub_args
62
+ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
63
+ """
64
+ Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
65
+ in the argument 'pretrained_model_name_or_path_list' as a list.
66
+
67
+ Parameters:
68
+ -----------
69
+ pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
70
+
71
+ **kwargs:
72
+ Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
73
+
74
+ cache_dir, resume_download, force_download, proxies, local_files_only, token, revision, torch_dtype, device_map.
75
+
76
+ alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
77
+ would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
78
+
79
+ interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
80
+ Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
81
+
82
+ force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
83
+
84
+ """
85
+ # Default kwargs from DiffusionPipeline
86
+ cache_dir = kwargs.pop("cache_dir", None)
87
+ resume_download = kwargs.pop("resume_download", False)
88
+ force_download = kwargs.pop("force_download", False)
89
+ proxies = kwargs.pop("proxies", None)
90
+ local_files_only = kwargs.pop("local_files_only", False)
91
+ token = kwargs.pop("token", None)
92
+ revision = kwargs.pop("revision", None)
93
+ torch_dtype = kwargs.pop("torch_dtype", None)
94
+ device_map = kwargs.pop("device_map", None)
95
+
96
+ alpha = kwargs.pop("alpha", 0.5)
97
+ interp = kwargs.pop("interp", None)
98
+
99
+ print("Received list", pretrained_model_name_or_path_list)
100
+ print(f"Combining with alpha={alpha}, interpolation mode={interp}")
101
+
102
+ checkpoint_count = len(pretrained_model_name_or_path_list)
103
+ # Ignore result from model_index_json comparision of the two checkpoints
104
+ force = kwargs.pop("force", False)
105
+
106
+ # If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
107
+ if checkpoint_count > 3 or checkpoint_count < 2:
108
+ raise ValueError(
109
+ "Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
110
+ " passed."
111
+ )
112
+
113
+ print("Received the right number of checkpoints")
114
+ # chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
115
+ # chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
116
+
117
+ # Validate that the checkpoints can be merged
118
+ # Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
119
+ config_dicts = []
120
+ for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
121
+ config_dict = DiffusionPipeline.load_config(
122
+ pretrained_model_name_or_path,
123
+ cache_dir=cache_dir,
124
+ resume_download=resume_download,
125
+ force_download=force_download,
126
+ proxies=proxies,
127
+ local_files_only=local_files_only,
128
+ token=token,
129
+ revision=revision,
130
+ )
131
+ config_dicts.append(config_dict)
132
+
133
+ comparison_result = True
134
+ for idx in range(1, len(config_dicts)):
135
+ comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx])
136
+ if not force and comparison_result is False:
137
+ raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.")
138
+ print(config_dicts[0], config_dicts[1])
139
+ print("Compatible model_index.json files found")
140
+ # Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
141
+ cached_folders = []
142
+ for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts):
143
+ folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
144
+ allow_patterns = [os.path.join(k, "*") for k in folder_names]
145
+ allow_patterns += [
146
+ WEIGHTS_NAME,
147
+ SCHEDULER_CONFIG_NAME,
148
+ CONFIG_NAME,
149
+ ONNX_WEIGHTS_NAME,
150
+ DiffusionPipeline.config_name,
151
+ ]
152
+ requested_pipeline_class = config_dict.get("_class_name")
153
+ user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
154
+
155
+ cached_folder = (
156
+ pretrained_model_name_or_path
157
+ if os.path.isdir(pretrained_model_name_or_path)
158
+ else snapshot_download(
159
+ pretrained_model_name_or_path,
160
+ cache_dir=cache_dir,
161
+ resume_download=resume_download,
162
+ proxies=proxies,
163
+ local_files_only=local_files_only,
164
+ token=token,
165
+ revision=revision,
166
+ allow_patterns=allow_patterns,
167
+ user_agent=user_agent,
168
+ )
169
+ )
170
+ print("Cached Folder", cached_folder)
171
+ cached_folders.append(cached_folder)
172
+
173
+ # Step 3:-
174
+ # Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
175
+ final_pipe = DiffusionPipeline.from_pretrained(
176
+ cached_folders[0], torch_dtype=torch_dtype, device_map=device_map
177
+ )
178
+ final_pipe.to(self.device)
179
+
180
+ checkpoint_path_2 = None
181
+ if len(cached_folders) > 2:
182
+ checkpoint_path_2 = os.path.join(cached_folders[2])
183
+
184
+ if interp == "sigmoid":
185
+ theta_func = CheckpointMergerPipeline.sigmoid
186
+ elif interp == "inv_sigmoid":
187
+ theta_func = CheckpointMergerPipeline.inv_sigmoid
188
+ elif interp == "add_diff":
189
+ theta_func = CheckpointMergerPipeline.add_difference
190
+ else:
191
+ theta_func = CheckpointMergerPipeline.weighted_sum
192
+
193
+ # Find each module's state dict.
194
+ for attr in final_pipe.config.keys():
195
+ if not attr.startswith("_"):
196
+ checkpoint_path_1 = os.path.join(cached_folders[1], attr)
197
+ if os.path.exists(checkpoint_path_1):
198
+ files = [
199
+ *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")),
200
+ *glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
201
+ ]
202
+ checkpoint_path_1 = files[0] if len(files) > 0 else None
203
+ if len(cached_folders) < 3:
204
+ checkpoint_path_2 = None
205
+ else:
206
+ checkpoint_path_2 = os.path.join(cached_folders[2], attr)
207
+ if os.path.exists(checkpoint_path_2):
208
+ files = [
209
+ *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")),
210
+ *glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
211
+ ]
212
+ checkpoint_path_2 = files[0] if len(files) > 0 else None
213
+ # For an attr if both checkpoint_path_1 and 2 are None, ignore.
214
+ # If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match.
215
+ if checkpoint_path_1 is None and checkpoint_path_2 is None:
216
+ print(f"Skipping {attr}: not present in 2nd or 3d model")
217
+ continue
218
+ try:
219
+ module = getattr(final_pipe, attr)
220
+ if isinstance(module, bool): # ignore requires_safety_checker boolean
221
+ continue
222
+ theta_0 = getattr(module, "state_dict")
223
+ theta_0 = theta_0()
224
+
225
+ update_theta_0 = getattr(module, "load_state_dict")
226
+ theta_1 = (
227
+ safetensors.torch.load_file(checkpoint_path_1)
228
+ if (checkpoint_path_1.endswith(".safetensors"))
229
+ else torch.load(checkpoint_path_1, map_location="cpu")
230
+ )
231
+ theta_2 = None
232
+ if checkpoint_path_2:
233
+ theta_2 = (
234
+ safetensors.torch.load_file(checkpoint_path_2)
235
+ if (checkpoint_path_2.endswith(".safetensors"))
236
+ else torch.load(checkpoint_path_2, map_location="cpu")
237
+ )
238
+
239
+ if not theta_0.keys() == theta_1.keys():
240
+ print(f"Skipping {attr}: key mismatch")
241
+ continue
242
+ if theta_2 and not theta_1.keys() == theta_2.keys():
243
+ print(f"Skipping {attr}:y mismatch")
244
+ except Exception as e:
245
+ print(f"Skipping {attr} do to an unexpected error: {str(e)}")
246
+ continue
247
+ print(f"MERGING {attr}")
248
+
249
+ for key in theta_0.keys():
250
+ if theta_2:
251
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha)
252
+ else:
253
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha)
254
+
255
+ del theta_1
256
+ del theta_2
257
+ update_theta_0(theta_0)
258
+
259
+ del theta_0
260
+ return final_pipe
261
+
262
+ @staticmethod
263
+ def weighted_sum(theta0, theta1, theta2, alpha):
264
+ return ((1 - alpha) * theta0) + (alpha * theta1)
265
+
266
+ # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
267
+ @staticmethod
268
+ def sigmoid(theta0, theta1, theta2, alpha):
269
+ alpha = alpha * alpha * (3 - (2 * alpha))
270
+ return theta0 + ((theta1 - theta0) * alpha)
271
+
272
+ # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
273
+ @staticmethod
274
+ def inv_sigmoid(theta0, theta1, theta2, alpha):
275
+ import math
276
+
277
+ alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
278
+ return theta0 + ((theta1 - theta0) * alpha)
279
+
280
+ @staticmethod
281
+ def add_difference(theta0, theta1, theta2, alpha):
282
+ return theta0 + (theta1 - theta2) * (1.0 - alpha)
v0.26.3/clip_guided_images_mixing_stable_diffusion.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import inspect
3
+ from typing import Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from torch.nn import functional as F
9
+ from torchvision import transforms
10
+ from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
+
12
+ from diffusers import (
13
+ AutoencoderKL,
14
+ DDIMScheduler,
15
+ DiffusionPipeline,
16
+ DPMSolverMultistepScheduler,
17
+ LMSDiscreteScheduler,
18
+ PNDMScheduler,
19
+ UNet2DConditionModel,
20
+ )
21
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.utils import PIL_INTERPOLATION
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ def preprocess(image, w, h):
27
+ if isinstance(image, torch.Tensor):
28
+ return image
29
+ elif isinstance(image, PIL.Image.Image):
30
+ image = [image]
31
+
32
+ if isinstance(image[0], PIL.Image.Image):
33
+ image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
34
+ image = np.concatenate(image, axis=0)
35
+ image = np.array(image).astype(np.float32) / 255.0
36
+ image = image.transpose(0, 3, 1, 2)
37
+ image = 2.0 * image - 1.0
38
+ image = torch.from_numpy(image)
39
+ elif isinstance(image[0], torch.Tensor):
40
+ image = torch.cat(image, dim=0)
41
+ return image
42
+
43
+
44
+ def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
45
+ if not isinstance(v0, np.ndarray):
46
+ inputs_are_torch = True
47
+ input_device = v0.device
48
+ v0 = v0.cpu().numpy()
49
+ v1 = v1.cpu().numpy()
50
+
51
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
52
+ if np.abs(dot) > DOT_THRESHOLD:
53
+ v2 = (1 - t) * v0 + t * v1
54
+ else:
55
+ theta_0 = np.arccos(dot)
56
+ sin_theta_0 = np.sin(theta_0)
57
+ theta_t = theta_0 * t
58
+ sin_theta_t = np.sin(theta_t)
59
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
60
+ s1 = sin_theta_t / sin_theta_0
61
+ v2 = s0 * v0 + s1 * v1
62
+
63
+ if inputs_are_torch:
64
+ v2 = torch.from_numpy(v2).to(input_device)
65
+
66
+ return v2
67
+
68
+
69
+ def spherical_dist_loss(x, y):
70
+ x = F.normalize(x, dim=-1)
71
+ y = F.normalize(y, dim=-1)
72
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
73
+
74
+
75
+ def set_requires_grad(model, value):
76
+ for param in model.parameters():
77
+ param.requires_grad = value
78
+
79
+
80
+ class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline):
81
+ def __init__(
82
+ self,
83
+ vae: AutoencoderKL,
84
+ text_encoder: CLIPTextModel,
85
+ clip_model: CLIPModel,
86
+ tokenizer: CLIPTokenizer,
87
+ unet: UNet2DConditionModel,
88
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
89
+ feature_extractor: CLIPFeatureExtractor,
90
+ coca_model=None,
91
+ coca_tokenizer=None,
92
+ coca_transform=None,
93
+ ):
94
+ super().__init__()
95
+ self.register_modules(
96
+ vae=vae,
97
+ text_encoder=text_encoder,
98
+ clip_model=clip_model,
99
+ tokenizer=tokenizer,
100
+ unet=unet,
101
+ scheduler=scheduler,
102
+ feature_extractor=feature_extractor,
103
+ coca_model=coca_model,
104
+ coca_tokenizer=coca_tokenizer,
105
+ coca_transform=coca_transform,
106
+ )
107
+ self.feature_extractor_size = (
108
+ feature_extractor.size
109
+ if isinstance(feature_extractor.size, int)
110
+ else feature_extractor.size["shortest_edge"]
111
+ )
112
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
113
+ set_requires_grad(self.text_encoder, False)
114
+ set_requires_grad(self.clip_model, False)
115
+
116
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
117
+ if slice_size == "auto":
118
+ # half the attention head size is usually a good trade-off between
119
+ # speed and memory
120
+ slice_size = self.unet.config.attention_head_dim // 2
121
+ self.unet.set_attention_slice(slice_size)
122
+
123
+ def disable_attention_slicing(self):
124
+ self.enable_attention_slicing(None)
125
+
126
+ def freeze_vae(self):
127
+ set_requires_grad(self.vae, False)
128
+
129
+ def unfreeze_vae(self):
130
+ set_requires_grad(self.vae, True)
131
+
132
+ def freeze_unet(self):
133
+ set_requires_grad(self.unet, False)
134
+
135
+ def unfreeze_unet(self):
136
+ set_requires_grad(self.unet, True)
137
+
138
+ def get_timesteps(self, num_inference_steps, strength, device):
139
+ # get the original timestep using init_timestep
140
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
141
+
142
+ t_start = max(num_inference_steps - init_timestep, 0)
143
+ timesteps = self.scheduler.timesteps[t_start:]
144
+
145
+ return timesteps, num_inference_steps - t_start
146
+
147
+ def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
148
+ if not isinstance(image, torch.Tensor):
149
+ raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
150
+
151
+ image = image.to(device=device, dtype=dtype)
152
+
153
+ if isinstance(generator, list):
154
+ init_latents = [
155
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
156
+ ]
157
+ init_latents = torch.cat(init_latents, dim=0)
158
+ else:
159
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
160
+
161
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
162
+ init_latents = 0.18215 * init_latents
163
+ init_latents = init_latents.repeat_interleave(batch_size, dim=0)
164
+
165
+ noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
166
+
167
+ # get latents
168
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
169
+ latents = init_latents
170
+
171
+ return latents
172
+
173
+ def get_image_description(self, image):
174
+ transformed_image = self.coca_transform(image).unsqueeze(0)
175
+ with torch.no_grad(), torch.cuda.amp.autocast():
176
+ generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
177
+ generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
178
+ return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
179
+
180
+ def get_clip_image_embeddings(self, image, batch_size):
181
+ clip_image_input = self.feature_extractor.preprocess(image)
182
+ clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
183
+ image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
184
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
185
+ image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
186
+ return image_embeddings_clip
187
+
188
+ @torch.enable_grad()
189
+ def cond_fn(
190
+ self,
191
+ latents,
192
+ timestep,
193
+ index,
194
+ text_embeddings,
195
+ noise_pred_original,
196
+ original_image_embeddings_clip,
197
+ clip_guidance_scale,
198
+ ):
199
+ latents = latents.detach().requires_grad_()
200
+
201
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
202
+
203
+ # predict the noise residual
204
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
205
+
206
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
207
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
208
+ beta_prod_t = 1 - alpha_prod_t
209
+ # compute predicted original sample from predicted noise also called
210
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
211
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
212
+
213
+ fac = torch.sqrt(beta_prod_t)
214
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
215
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
216
+ sigma = self.scheduler.sigmas[index]
217
+ sample = latents - sigma * noise_pred
218
+ else:
219
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
220
+
221
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
222
+ sample = 1 / 0.18215 * sample
223
+ image = self.vae.decode(sample).sample
224
+ image = (image / 2 + 0.5).clamp(0, 1)
225
+
226
+ image = transforms.Resize(self.feature_extractor_size)(image)
227
+ image = self.normalize(image).to(latents.dtype)
228
+
229
+ image_embeddings_clip = self.clip_model.get_image_features(image)
230
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
231
+
232
+ loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
233
+
234
+ grads = -torch.autograd.grad(loss, latents)[0]
235
+
236
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
237
+ latents = latents.detach() + grads * (sigma**2)
238
+ noise_pred = noise_pred_original
239
+ else:
240
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
241
+ return noise_pred, latents
242
+
243
+ @torch.no_grad()
244
+ def __call__(
245
+ self,
246
+ style_image: Union[torch.FloatTensor, PIL.Image.Image],
247
+ content_image: Union[torch.FloatTensor, PIL.Image.Image],
248
+ style_prompt: Optional[str] = None,
249
+ content_prompt: Optional[str] = None,
250
+ height: Optional[int] = 512,
251
+ width: Optional[int] = 512,
252
+ noise_strength: float = 0.6,
253
+ num_inference_steps: Optional[int] = 50,
254
+ guidance_scale: Optional[float] = 7.5,
255
+ batch_size: Optional[int] = 1,
256
+ eta: float = 0.0,
257
+ clip_guidance_scale: Optional[float] = 100,
258
+ generator: Optional[torch.Generator] = None,
259
+ output_type: Optional[str] = "pil",
260
+ return_dict: bool = True,
261
+ slerp_latent_style_strength: float = 0.8,
262
+ slerp_prompt_style_strength: float = 0.1,
263
+ slerp_clip_image_style_strength: float = 0.1,
264
+ ):
265
+ if isinstance(generator, list) and len(generator) != batch_size:
266
+ raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
267
+
268
+ if height % 8 != 0 or width % 8 != 0:
269
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
270
+
271
+ if isinstance(generator, torch.Generator) and batch_size > 1:
272
+ generator = [generator] + [None] * (batch_size - 1)
273
+
274
+ coca_is_none = [
275
+ ("model", self.coca_model is None),
276
+ ("tokenizer", self.coca_tokenizer is None),
277
+ ("transform", self.coca_transform is None),
278
+ ]
279
+ coca_is_none = [x[0] for x in coca_is_none if x[1]]
280
+ coca_is_none_str = ", ".join(coca_is_none)
281
+ # generate prompts with coca model if prompt is None
282
+ if content_prompt is None:
283
+ if len(coca_is_none):
284
+ raise ValueError(
285
+ f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
286
+ f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
287
+ )
288
+ content_prompt = self.get_image_description(content_image)
289
+ if style_prompt is None:
290
+ if len(coca_is_none):
291
+ raise ValueError(
292
+ f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
293
+ f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
294
+ )
295
+ style_prompt = self.get_image_description(style_image)
296
+
297
+ # get prompt text embeddings for content and style
298
+ content_text_input = self.tokenizer(
299
+ content_prompt,
300
+ padding="max_length",
301
+ max_length=self.tokenizer.model_max_length,
302
+ truncation=True,
303
+ return_tensors="pt",
304
+ )
305
+ content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
306
+
307
+ style_text_input = self.tokenizer(
308
+ style_prompt,
309
+ padding="max_length",
310
+ max_length=self.tokenizer.model_max_length,
311
+ truncation=True,
312
+ return_tensors="pt",
313
+ )
314
+ style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
315
+
316
+ text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
317
+
318
+ # duplicate text embeddings for each generation per prompt
319
+ text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
320
+
321
+ # set timesteps
322
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
323
+ extra_set_kwargs = {}
324
+ if accepts_offset:
325
+ extra_set_kwargs["offset"] = 1
326
+
327
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
328
+ # Some schedulers like PNDM have timesteps as arrays
329
+ # It's more optimized to move all timesteps to correct device beforehand
330
+ self.scheduler.timesteps.to(self.device)
331
+
332
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
333
+ latent_timestep = timesteps[:1].repeat(batch_size)
334
+
335
+ # Preprocess image
336
+ preprocessed_content_image = preprocess(content_image, width, height)
337
+ content_latents = self.prepare_latents(
338
+ preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
339
+ )
340
+
341
+ preprocessed_style_image = preprocess(style_image, width, height)
342
+ style_latents = self.prepare_latents(
343
+ preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
344
+ )
345
+
346
+ latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
347
+
348
+ if clip_guidance_scale > 0:
349
+ content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
350
+ style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
351
+ clip_image_embeddings = slerp(
352
+ slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
353
+ )
354
+
355
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
356
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
357
+ # corresponds to doing no classifier free guidance.
358
+ do_classifier_free_guidance = guidance_scale > 1.0
359
+ # get unconditional embeddings for classifier free guidance
360
+ if do_classifier_free_guidance:
361
+ max_length = content_text_input.input_ids.shape[-1]
362
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
363
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
364
+ # duplicate unconditional embeddings for each generation per prompt
365
+ uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
366
+
367
+ # For classifier free guidance, we need to do two forward passes.
368
+ # Here we concatenate the unconditional and text embeddings into a single batch
369
+ # to avoid doing two forward passes
370
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
371
+
372
+ # get the initial random noise unless the user supplied it
373
+
374
+ # Unlike in other pipelines, latents need to be generated in the target device
375
+ # for 1-to-1 results reproducibility with the CompVis implementation.
376
+ # However this currently doesn't work in `mps`.
377
+ latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
378
+ latents_dtype = text_embeddings.dtype
379
+ if latents is None:
380
+ if self.device.type == "mps":
381
+ # randn does not work reproducibly on mps
382
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
383
+ self.device
384
+ )
385
+ else:
386
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
387
+ else:
388
+ if latents.shape != latents_shape:
389
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
390
+ latents = latents.to(self.device)
391
+
392
+ # scale the initial noise by the standard deviation required by the scheduler
393
+ latents = latents * self.scheduler.init_noise_sigma
394
+
395
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
396
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
397
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
398
+ # and should be between [0, 1]
399
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
400
+ extra_step_kwargs = {}
401
+ if accepts_eta:
402
+ extra_step_kwargs["eta"] = eta
403
+
404
+ # check if the scheduler accepts generator
405
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
406
+ if accepts_generator:
407
+ extra_step_kwargs["generator"] = generator
408
+
409
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
410
+ for i, t in enumerate(timesteps):
411
+ # expand the latents if we are doing classifier free guidance
412
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
413
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
414
+
415
+ # predict the noise residual
416
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
417
+
418
+ # perform classifier free guidance
419
+ if do_classifier_free_guidance:
420
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
421
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
422
+
423
+ # perform clip guidance
424
+ if clip_guidance_scale > 0:
425
+ text_embeddings_for_guidance = (
426
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
427
+ )
428
+ noise_pred, latents = self.cond_fn(
429
+ latents,
430
+ t,
431
+ i,
432
+ text_embeddings_for_guidance,
433
+ noise_pred,
434
+ clip_image_embeddings,
435
+ clip_guidance_scale,
436
+ )
437
+
438
+ # compute the previous noisy sample x_t -> x_t-1
439
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
440
+
441
+ progress_bar.update()
442
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
443
+ latents = 1 / 0.18215 * latents
444
+ image = self.vae.decode(latents).sample
445
+
446
+ image = (image / 2 + 0.5).clamp(0, 1)
447
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
448
+
449
+ if output_type == "pil":
450
+ image = self.numpy_to_pil(image)
451
+
452
+ if not return_dict:
453
+ return (image, None)
454
+
455
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
v0.26.3/clip_guided_stable_diffusion.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+ from torchvision import transforms
8
+ from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
9
+
10
+ from diffusers import (
11
+ AutoencoderKL,
12
+ DDIMScheduler,
13
+ DiffusionPipeline,
14
+ DPMSolverMultistepScheduler,
15
+ LMSDiscreteScheduler,
16
+ PNDMScheduler,
17
+ UNet2DConditionModel,
18
+ )
19
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
20
+
21
+
22
+ class MakeCutouts(nn.Module):
23
+ def __init__(self, cut_size, cut_power=1.0):
24
+ super().__init__()
25
+
26
+ self.cut_size = cut_size
27
+ self.cut_power = cut_power
28
+
29
+ def forward(self, pixel_values, num_cutouts):
30
+ sideY, sideX = pixel_values.shape[2:4]
31
+ max_size = min(sideX, sideY)
32
+ min_size = min(sideX, sideY, self.cut_size)
33
+ cutouts = []
34
+ for _ in range(num_cutouts):
35
+ size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
36
+ offsetx = torch.randint(0, sideX - size + 1, ())
37
+ offsety = torch.randint(0, sideY - size + 1, ())
38
+ cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
39
+ cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
40
+ return torch.cat(cutouts)
41
+
42
+
43
+ def spherical_dist_loss(x, y):
44
+ x = F.normalize(x, dim=-1)
45
+ y = F.normalize(y, dim=-1)
46
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
47
+
48
+
49
+ def set_requires_grad(model, value):
50
+ for param in model.parameters():
51
+ param.requires_grad = value
52
+
53
+
54
+ class CLIPGuidedStableDiffusion(DiffusionPipeline):
55
+ """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
56
+ - https://github.com/Jack000/glid-3-xl
57
+ - https://github.dev/crowsonkb/k-diffusion
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ vae: AutoencoderKL,
63
+ text_encoder: CLIPTextModel,
64
+ clip_model: CLIPModel,
65
+ tokenizer: CLIPTokenizer,
66
+ unet: UNet2DConditionModel,
67
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
68
+ feature_extractor: CLIPImageProcessor,
69
+ ):
70
+ super().__init__()
71
+ self.register_modules(
72
+ vae=vae,
73
+ text_encoder=text_encoder,
74
+ clip_model=clip_model,
75
+ tokenizer=tokenizer,
76
+ unet=unet,
77
+ scheduler=scheduler,
78
+ feature_extractor=feature_extractor,
79
+ )
80
+
81
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
82
+ self.cut_out_size = (
83
+ feature_extractor.size
84
+ if isinstance(feature_extractor.size, int)
85
+ else feature_extractor.size["shortest_edge"]
86
+ )
87
+ self.make_cutouts = MakeCutouts(self.cut_out_size)
88
+
89
+ set_requires_grad(self.text_encoder, False)
90
+ set_requires_grad(self.clip_model, False)
91
+
92
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
93
+ if slice_size == "auto":
94
+ # half the attention head size is usually a good trade-off between
95
+ # speed and memory
96
+ slice_size = self.unet.config.attention_head_dim // 2
97
+ self.unet.set_attention_slice(slice_size)
98
+
99
+ def disable_attention_slicing(self):
100
+ self.enable_attention_slicing(None)
101
+
102
+ def freeze_vae(self):
103
+ set_requires_grad(self.vae, False)
104
+
105
+ def unfreeze_vae(self):
106
+ set_requires_grad(self.vae, True)
107
+
108
+ def freeze_unet(self):
109
+ set_requires_grad(self.unet, False)
110
+
111
+ def unfreeze_unet(self):
112
+ set_requires_grad(self.unet, True)
113
+
114
+ @torch.enable_grad()
115
+ def cond_fn(
116
+ self,
117
+ latents,
118
+ timestep,
119
+ index,
120
+ text_embeddings,
121
+ noise_pred_original,
122
+ text_embeddings_clip,
123
+ clip_guidance_scale,
124
+ num_cutouts,
125
+ use_cutouts=True,
126
+ ):
127
+ latents = latents.detach().requires_grad_()
128
+
129
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
130
+
131
+ # predict the noise residual
132
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
133
+
134
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
135
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
136
+ beta_prod_t = 1 - alpha_prod_t
137
+ # compute predicted original sample from predicted noise also called
138
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
139
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
140
+
141
+ fac = torch.sqrt(beta_prod_t)
142
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
143
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
144
+ sigma = self.scheduler.sigmas[index]
145
+ sample = latents - sigma * noise_pred
146
+ else:
147
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
148
+
149
+ sample = 1 / self.vae.config.scaling_factor * sample
150
+ image = self.vae.decode(sample).sample
151
+ image = (image / 2 + 0.5).clamp(0, 1)
152
+
153
+ if use_cutouts:
154
+ image = self.make_cutouts(image, num_cutouts)
155
+ else:
156
+ image = transforms.Resize(self.cut_out_size)(image)
157
+ image = self.normalize(image).to(latents.dtype)
158
+
159
+ image_embeddings_clip = self.clip_model.get_image_features(image)
160
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
161
+
162
+ if use_cutouts:
163
+ dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
164
+ dists = dists.view([num_cutouts, sample.shape[0], -1])
165
+ loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
166
+ else:
167
+ loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
168
+
169
+ grads = -torch.autograd.grad(loss, latents)[0]
170
+
171
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
172
+ latents = latents.detach() + grads * (sigma**2)
173
+ noise_pred = noise_pred_original
174
+ else:
175
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
176
+ return noise_pred, latents
177
+
178
+ @torch.no_grad()
179
+ def __call__(
180
+ self,
181
+ prompt: Union[str, List[str]],
182
+ height: Optional[int] = 512,
183
+ width: Optional[int] = 512,
184
+ num_inference_steps: Optional[int] = 50,
185
+ guidance_scale: Optional[float] = 7.5,
186
+ num_images_per_prompt: Optional[int] = 1,
187
+ eta: float = 0.0,
188
+ clip_guidance_scale: Optional[float] = 100,
189
+ clip_prompt: Optional[Union[str, List[str]]] = None,
190
+ num_cutouts: Optional[int] = 4,
191
+ use_cutouts: Optional[bool] = True,
192
+ generator: Optional[torch.Generator] = None,
193
+ latents: Optional[torch.FloatTensor] = None,
194
+ output_type: Optional[str] = "pil",
195
+ return_dict: bool = True,
196
+ ):
197
+ if isinstance(prompt, str):
198
+ batch_size = 1
199
+ elif isinstance(prompt, list):
200
+ batch_size = len(prompt)
201
+ else:
202
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
203
+
204
+ if height % 8 != 0 or width % 8 != 0:
205
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
206
+
207
+ # get prompt text embeddings
208
+ text_input = self.tokenizer(
209
+ prompt,
210
+ padding="max_length",
211
+ max_length=self.tokenizer.model_max_length,
212
+ truncation=True,
213
+ return_tensors="pt",
214
+ )
215
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
216
+ # duplicate text embeddings for each generation per prompt
217
+ text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
218
+
219
+ if clip_guidance_scale > 0:
220
+ if clip_prompt is not None:
221
+ clip_text_input = self.tokenizer(
222
+ clip_prompt,
223
+ padding="max_length",
224
+ max_length=self.tokenizer.model_max_length,
225
+ truncation=True,
226
+ return_tensors="pt",
227
+ ).input_ids.to(self.device)
228
+ else:
229
+ clip_text_input = text_input.input_ids.to(self.device)
230
+ text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
231
+ text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
232
+ # duplicate text embeddings clip for each generation per prompt
233
+ text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
234
+
235
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
236
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
237
+ # corresponds to doing no classifier free guidance.
238
+ do_classifier_free_guidance = guidance_scale > 1.0
239
+ # get unconditional embeddings for classifier free guidance
240
+ if do_classifier_free_guidance:
241
+ max_length = text_input.input_ids.shape[-1]
242
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
243
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
244
+ # duplicate unconditional embeddings for each generation per prompt
245
+ uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
246
+
247
+ # For classifier free guidance, we need to do two forward passes.
248
+ # Here we concatenate the unconditional and text embeddings into a single batch
249
+ # to avoid doing two forward passes
250
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
251
+
252
+ # get the initial random noise unless the user supplied it
253
+
254
+ # Unlike in other pipelines, latents need to be generated in the target device
255
+ # for 1-to-1 results reproducibility with the CompVis implementation.
256
+ # However this currently doesn't work in `mps`.
257
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
258
+ latents_dtype = text_embeddings.dtype
259
+ if latents is None:
260
+ if self.device.type == "mps":
261
+ # randn does not work reproducibly on mps
262
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
263
+ self.device
264
+ )
265
+ else:
266
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
267
+ else:
268
+ if latents.shape != latents_shape:
269
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
270
+ latents = latents.to(self.device)
271
+
272
+ # set timesteps
273
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
274
+ extra_set_kwargs = {}
275
+ if accepts_offset:
276
+ extra_set_kwargs["offset"] = 1
277
+
278
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
279
+
280
+ # Some schedulers like PNDM have timesteps as arrays
281
+ # It's more optimized to move all timesteps to correct device beforehand
282
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
283
+
284
+ # scale the initial noise by the standard deviation required by the scheduler
285
+ latents = latents * self.scheduler.init_noise_sigma
286
+
287
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
288
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
289
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
290
+ # and should be between [0, 1]
291
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
292
+ extra_step_kwargs = {}
293
+ if accepts_eta:
294
+ extra_step_kwargs["eta"] = eta
295
+
296
+ # check if the scheduler accepts generator
297
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
298
+ if accepts_generator:
299
+ extra_step_kwargs["generator"] = generator
300
+
301
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
302
+ # expand the latents if we are doing classifier free guidance
303
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
304
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
305
+
306
+ # predict the noise residual
307
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
308
+
309
+ # perform classifier free guidance
310
+ if do_classifier_free_guidance:
311
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
312
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
313
+
314
+ # perform clip guidance
315
+ if clip_guidance_scale > 0:
316
+ text_embeddings_for_guidance = (
317
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
318
+ )
319
+ noise_pred, latents = self.cond_fn(
320
+ latents,
321
+ t,
322
+ i,
323
+ text_embeddings_for_guidance,
324
+ noise_pred,
325
+ text_embeddings_clip,
326
+ clip_guidance_scale,
327
+ num_cutouts,
328
+ use_cutouts,
329
+ )
330
+
331
+ # compute the previous noisy sample x_t -> x_t-1
332
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
333
+
334
+ # scale and decode the image latents with vae
335
+ latents = 1 / self.vae.config.scaling_factor * latents
336
+ image = self.vae.decode(latents).sample
337
+
338
+ image = (image / 2 + 0.5).clamp(0, 1)
339
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
340
+
341
+ if output_type == "pil":
342
+ image = self.numpy_to_pil(image)
343
+
344
+ if not return_dict:
345
+ return (image, None)
346
+
347
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
v0.26.3/clip_guided_stable_diffusion_img2img.py ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+ from torchvision import transforms
10
+ from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
+
12
+ from diffusers import (
13
+ AutoencoderKL,
14
+ DDIMScheduler,
15
+ DiffusionPipeline,
16
+ DPMSolverMultistepScheduler,
17
+ LMSDiscreteScheduler,
18
+ PNDMScheduler,
19
+ UNet2DConditionModel,
20
+ )
21
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.utils import PIL_INTERPOLATION, deprecate
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ EXAMPLE_DOC_STRING = """
27
+ Examples:
28
+ ```
29
+ from io import BytesIO
30
+
31
+ import requests
32
+ import torch
33
+ from diffusers import DiffusionPipeline
34
+ from PIL import Image
35
+ from transformers import CLIPFeatureExtractor, CLIPModel
36
+
37
+ feature_extractor = CLIPFeatureExtractor.from_pretrained(
38
+ "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
39
+ )
40
+ clip_model = CLIPModel.from_pretrained(
41
+ "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
42
+ )
43
+
44
+
45
+ guided_pipeline = DiffusionPipeline.from_pretrained(
46
+ "CompVis/stable-diffusion-v1-4",
47
+ # custom_pipeline="clip_guided_stable_diffusion",
48
+ custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
49
+ clip_model=clip_model,
50
+ feature_extractor=feature_extractor,
51
+ torch_dtype=torch.float16,
52
+ )
53
+ guided_pipeline.enable_attention_slicing()
54
+ guided_pipeline = guided_pipeline.to("cuda")
55
+
56
+ prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
57
+
58
+ url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
59
+
60
+ response = requests.get(url)
61
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
62
+
63
+ image = guided_pipeline(
64
+ prompt=prompt,
65
+ num_inference_steps=30,
66
+ image=init_image,
67
+ strength=0.75,
68
+ guidance_scale=7.5,
69
+ clip_guidance_scale=100,
70
+ num_cutouts=4,
71
+ use_cutouts=False,
72
+ ).images[0]
73
+ display(image)
74
+ ```
75
+ """
76
+
77
+
78
+ def preprocess(image, w, h):
79
+ if isinstance(image, torch.Tensor):
80
+ return image
81
+ elif isinstance(image, PIL.Image.Image):
82
+ image = [image]
83
+
84
+ if isinstance(image[0], PIL.Image.Image):
85
+ image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
86
+ image = np.concatenate(image, axis=0)
87
+ image = np.array(image).astype(np.float32) / 255.0
88
+ image = image.transpose(0, 3, 1, 2)
89
+ image = 2.0 * image - 1.0
90
+ image = torch.from_numpy(image)
91
+ elif isinstance(image[0], torch.Tensor):
92
+ image = torch.cat(image, dim=0)
93
+ return image
94
+
95
+
96
+ class MakeCutouts(nn.Module):
97
+ def __init__(self, cut_size, cut_power=1.0):
98
+ super().__init__()
99
+
100
+ self.cut_size = cut_size
101
+ self.cut_power = cut_power
102
+
103
+ def forward(self, pixel_values, num_cutouts):
104
+ sideY, sideX = pixel_values.shape[2:4]
105
+ max_size = min(sideX, sideY)
106
+ min_size = min(sideX, sideY, self.cut_size)
107
+ cutouts = []
108
+ for _ in range(num_cutouts):
109
+ size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
110
+ offsetx = torch.randint(0, sideX - size + 1, ())
111
+ offsety = torch.randint(0, sideY - size + 1, ())
112
+ cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
113
+ cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
114
+ return torch.cat(cutouts)
115
+
116
+
117
+ def spherical_dist_loss(x, y):
118
+ x = F.normalize(x, dim=-1)
119
+ y = F.normalize(y, dim=-1)
120
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
121
+
122
+
123
+ def set_requires_grad(model, value):
124
+ for param in model.parameters():
125
+ param.requires_grad = value
126
+
127
+
128
+ class CLIPGuidedStableDiffusion(DiffusionPipeline):
129
+ """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
130
+ - https://github.com/Jack000/glid-3-xl
131
+ - https://github.dev/crowsonkb/k-diffusion
132
+ """
133
+
134
+ def __init__(
135
+ self,
136
+ vae: AutoencoderKL,
137
+ text_encoder: CLIPTextModel,
138
+ clip_model: CLIPModel,
139
+ tokenizer: CLIPTokenizer,
140
+ unet: UNet2DConditionModel,
141
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
142
+ feature_extractor: CLIPFeatureExtractor,
143
+ ):
144
+ super().__init__()
145
+ self.register_modules(
146
+ vae=vae,
147
+ text_encoder=text_encoder,
148
+ clip_model=clip_model,
149
+ tokenizer=tokenizer,
150
+ unet=unet,
151
+ scheduler=scheduler,
152
+ feature_extractor=feature_extractor,
153
+ )
154
+
155
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
156
+ self.cut_out_size = (
157
+ feature_extractor.size
158
+ if isinstance(feature_extractor.size, int)
159
+ else feature_extractor.size["shortest_edge"]
160
+ )
161
+ self.make_cutouts = MakeCutouts(self.cut_out_size)
162
+
163
+ set_requires_grad(self.text_encoder, False)
164
+ set_requires_grad(self.clip_model, False)
165
+
166
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
167
+ if slice_size == "auto":
168
+ # half the attention head size is usually a good trade-off between
169
+ # speed and memory
170
+ slice_size = self.unet.config.attention_head_dim // 2
171
+ self.unet.set_attention_slice(slice_size)
172
+
173
+ def disable_attention_slicing(self):
174
+ self.enable_attention_slicing(None)
175
+
176
+ def freeze_vae(self):
177
+ set_requires_grad(self.vae, False)
178
+
179
+ def unfreeze_vae(self):
180
+ set_requires_grad(self.vae, True)
181
+
182
+ def freeze_unet(self):
183
+ set_requires_grad(self.unet, False)
184
+
185
+ def unfreeze_unet(self):
186
+ set_requires_grad(self.unet, True)
187
+
188
+ def get_timesteps(self, num_inference_steps, strength, device):
189
+ # get the original timestep using init_timestep
190
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
191
+
192
+ t_start = max(num_inference_steps - init_timestep, 0)
193
+ timesteps = self.scheduler.timesteps[t_start:]
194
+
195
+ return timesteps, num_inference_steps - t_start
196
+
197
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
198
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
199
+ raise ValueError(
200
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
201
+ )
202
+
203
+ image = image.to(device=device, dtype=dtype)
204
+
205
+ batch_size = batch_size * num_images_per_prompt
206
+ if isinstance(generator, list) and len(generator) != batch_size:
207
+ raise ValueError(
208
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
209
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
210
+ )
211
+
212
+ if isinstance(generator, list):
213
+ init_latents = [
214
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
215
+ ]
216
+ init_latents = torch.cat(init_latents, dim=0)
217
+ else:
218
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
219
+
220
+ init_latents = self.vae.config.scaling_factor * init_latents
221
+
222
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
223
+ # expand init_latents for batch_size
224
+ deprecation_message = (
225
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
226
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
227
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
228
+ " your script to pass as many initial images as text prompts to suppress this warning."
229
+ )
230
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
231
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
232
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
233
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
234
+ raise ValueError(
235
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
236
+ )
237
+ else:
238
+ init_latents = torch.cat([init_latents], dim=0)
239
+
240
+ shape = init_latents.shape
241
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
242
+
243
+ # get latents
244
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
245
+ latents = init_latents
246
+
247
+ return latents
248
+
249
+ @torch.enable_grad()
250
+ def cond_fn(
251
+ self,
252
+ latents,
253
+ timestep,
254
+ index,
255
+ text_embeddings,
256
+ noise_pred_original,
257
+ text_embeddings_clip,
258
+ clip_guidance_scale,
259
+ num_cutouts,
260
+ use_cutouts=True,
261
+ ):
262
+ latents = latents.detach().requires_grad_()
263
+
264
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
265
+
266
+ # predict the noise residual
267
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
268
+
269
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
270
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
271
+ beta_prod_t = 1 - alpha_prod_t
272
+ # compute predicted original sample from predicted noise also called
273
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
274
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
275
+
276
+ fac = torch.sqrt(beta_prod_t)
277
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
278
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
279
+ sigma = self.scheduler.sigmas[index]
280
+ sample = latents - sigma * noise_pred
281
+ else:
282
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
283
+
284
+ sample = 1 / self.vae.config.scaling_factor * sample
285
+ image = self.vae.decode(sample).sample
286
+ image = (image / 2 + 0.5).clamp(0, 1)
287
+
288
+ if use_cutouts:
289
+ image = self.make_cutouts(image, num_cutouts)
290
+ else:
291
+ image = transforms.Resize(self.cut_out_size)(image)
292
+ image = self.normalize(image).to(latents.dtype)
293
+
294
+ image_embeddings_clip = self.clip_model.get_image_features(image)
295
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
296
+
297
+ if use_cutouts:
298
+ dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
299
+ dists = dists.view([num_cutouts, sample.shape[0], -1])
300
+ loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
301
+ else:
302
+ loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
303
+
304
+ grads = -torch.autograd.grad(loss, latents)[0]
305
+
306
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
307
+ latents = latents.detach() + grads * (sigma**2)
308
+ noise_pred = noise_pred_original
309
+ else:
310
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
311
+ return noise_pred, latents
312
+
313
+ @torch.no_grad()
314
+ def __call__(
315
+ self,
316
+ prompt: Union[str, List[str]],
317
+ height: Optional[int] = 512,
318
+ width: Optional[int] = 512,
319
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
320
+ strength: float = 0.8,
321
+ num_inference_steps: Optional[int] = 50,
322
+ guidance_scale: Optional[float] = 7.5,
323
+ num_images_per_prompt: Optional[int] = 1,
324
+ eta: float = 0.0,
325
+ clip_guidance_scale: Optional[float] = 100,
326
+ clip_prompt: Optional[Union[str, List[str]]] = None,
327
+ num_cutouts: Optional[int] = 4,
328
+ use_cutouts: Optional[bool] = True,
329
+ generator: Optional[torch.Generator] = None,
330
+ latents: Optional[torch.FloatTensor] = None,
331
+ output_type: Optional[str] = "pil",
332
+ return_dict: bool = True,
333
+ ):
334
+ if isinstance(prompt, str):
335
+ batch_size = 1
336
+ elif isinstance(prompt, list):
337
+ batch_size = len(prompt)
338
+ else:
339
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
340
+
341
+ if height % 8 != 0 or width % 8 != 0:
342
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
343
+
344
+ # get prompt text embeddings
345
+ text_input = self.tokenizer(
346
+ prompt,
347
+ padding="max_length",
348
+ max_length=self.tokenizer.model_max_length,
349
+ truncation=True,
350
+ return_tensors="pt",
351
+ )
352
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
353
+ # duplicate text embeddings for each generation per prompt
354
+ text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
355
+
356
+ # set timesteps
357
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
358
+ extra_set_kwargs = {}
359
+ if accepts_offset:
360
+ extra_set_kwargs["offset"] = 1
361
+
362
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
363
+ # Some schedulers like PNDM have timesteps as arrays
364
+ # It's more optimized to move all timesteps to correct device beforehand
365
+ self.scheduler.timesteps.to(self.device)
366
+
367
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
368
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
369
+
370
+ # Preprocess image
371
+ image = preprocess(image, width, height)
372
+ latents = self.prepare_latents(
373
+ image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, self.device, generator
374
+ )
375
+
376
+ if clip_guidance_scale > 0:
377
+ if clip_prompt is not None:
378
+ clip_text_input = self.tokenizer(
379
+ clip_prompt,
380
+ padding="max_length",
381
+ max_length=self.tokenizer.model_max_length,
382
+ truncation=True,
383
+ return_tensors="pt",
384
+ ).input_ids.to(self.device)
385
+ else:
386
+ clip_text_input = text_input.input_ids.to(self.device)
387
+ text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
388
+ text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
389
+ # duplicate text embeddings clip for each generation per prompt
390
+ text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
391
+
392
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
393
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
394
+ # corresponds to doing no classifier free guidance.
395
+ do_classifier_free_guidance = guidance_scale > 1.0
396
+ # get unconditional embeddings for classifier free guidance
397
+ if do_classifier_free_guidance:
398
+ max_length = text_input.input_ids.shape[-1]
399
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
400
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
401
+ # duplicate unconditional embeddings for each generation per prompt
402
+ uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
403
+
404
+ # For classifier free guidance, we need to do two forward passes.
405
+ # Here we concatenate the unconditional and text embeddings into a single batch
406
+ # to avoid doing two forward passes
407
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
408
+
409
+ # get the initial random noise unless the user supplied it
410
+
411
+ # Unlike in other pipelines, latents need to be generated in the target device
412
+ # for 1-to-1 results reproducibility with the CompVis implementation.
413
+ # However this currently doesn't work in `mps`.
414
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
415
+ latents_dtype = text_embeddings.dtype
416
+ if latents is None:
417
+ if self.device.type == "mps":
418
+ # randn does not work reproducibly on mps
419
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
420
+ self.device
421
+ )
422
+ else:
423
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
424
+ else:
425
+ if latents.shape != latents_shape:
426
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
427
+ latents = latents.to(self.device)
428
+
429
+ # scale the initial noise by the standard deviation required by the scheduler
430
+ latents = latents * self.scheduler.init_noise_sigma
431
+
432
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
433
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
434
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
435
+ # and should be between [0, 1]
436
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
437
+ extra_step_kwargs = {}
438
+ if accepts_eta:
439
+ extra_step_kwargs["eta"] = eta
440
+
441
+ # check if the scheduler accepts generator
442
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
443
+ if accepts_generator:
444
+ extra_step_kwargs["generator"] = generator
445
+
446
+ with self.progress_bar(total=num_inference_steps):
447
+ for i, t in enumerate(timesteps):
448
+ # expand the latents if we are doing classifier free guidance
449
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
450
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
451
+
452
+ # predict the noise residual
453
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
454
+
455
+ # perform classifier free guidance
456
+ if do_classifier_free_guidance:
457
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
458
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
459
+
460
+ # perform clip guidance
461
+ if clip_guidance_scale > 0:
462
+ text_embeddings_for_guidance = (
463
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
464
+ )
465
+ noise_pred, latents = self.cond_fn(
466
+ latents,
467
+ t,
468
+ i,
469
+ text_embeddings_for_guidance,
470
+ noise_pred,
471
+ text_embeddings_clip,
472
+ clip_guidance_scale,
473
+ num_cutouts,
474
+ use_cutouts,
475
+ )
476
+
477
+ # compute the previous noisy sample x_t -> x_t-1
478
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
479
+
480
+ # scale and decode the image latents with vae
481
+ latents = 1 / self.vae.config.scaling_factor * latents
482
+ image = self.vae.decode(latents).sample
483
+
484
+ image = (image / 2 + 0.5).clamp(0, 1)
485
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
486
+
487
+ if output_type == "pil":
488
+ image = self.numpy_to_pil(image)
489
+
490
+ if not return_dict:
491
+ return (image, None)
492
+
493
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
v0.26.3/composable_stable_diffusion.py ADDED
@@ -0,0 +1,582 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Callable, List, Optional, Union
17
+
18
+ import torch
19
+ from packaging import version
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
21
+
22
+ from diffusers import DiffusionPipeline
23
+ from diffusers.configuration_utils import FrozenDict
24
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
25
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
26
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
27
+ from diffusers.schedulers import (
28
+ DDIMScheduler,
29
+ DPMSolverMultistepScheduler,
30
+ EulerAncestralDiscreteScheduler,
31
+ EulerDiscreteScheduler,
32
+ LMSDiscreteScheduler,
33
+ PNDMScheduler,
34
+ )
35
+ from diffusers.utils import deprecate, is_accelerate_available, logging
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ class ComposableStableDiffusionPipeline(DiffusionPipeline):
42
+ r"""
43
+ Pipeline for text-to-image generation using Stable Diffusion.
44
+
45
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
46
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
47
+
48
+ Args:
49
+ vae ([`AutoencoderKL`]):
50
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
51
+ text_encoder ([`CLIPTextModel`]):
52
+ Frozen text-encoder. Stable Diffusion uses the text portion of
53
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
54
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
55
+ tokenizer (`CLIPTokenizer`):
56
+ Tokenizer of class
57
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
58
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
59
+ scheduler ([`SchedulerMixin`]):
60
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
61
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
62
+ safety_checker ([`StableDiffusionSafetyChecker`]):
63
+ Classification module that estimates whether generated images could be considered offensive or harmful.
64
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
65
+ feature_extractor ([`CLIPImageProcessor`]):
66
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
67
+ """
68
+
69
+ _optional_components = ["safety_checker", "feature_extractor"]
70
+
71
+ def __init__(
72
+ self,
73
+ vae: AutoencoderKL,
74
+ text_encoder: CLIPTextModel,
75
+ tokenizer: CLIPTokenizer,
76
+ unet: UNet2DConditionModel,
77
+ scheduler: Union[
78
+ DDIMScheduler,
79
+ PNDMScheduler,
80
+ LMSDiscreteScheduler,
81
+ EulerDiscreteScheduler,
82
+ EulerAncestralDiscreteScheduler,
83
+ DPMSolverMultistepScheduler,
84
+ ],
85
+ safety_checker: StableDiffusionSafetyChecker,
86
+ feature_extractor: CLIPImageProcessor,
87
+ requires_safety_checker: bool = True,
88
+ ):
89
+ super().__init__()
90
+
91
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
92
+ deprecation_message = (
93
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
94
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
95
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
96
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
97
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
98
+ " file"
99
+ )
100
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
101
+ new_config = dict(scheduler.config)
102
+ new_config["steps_offset"] = 1
103
+ scheduler._internal_dict = FrozenDict(new_config)
104
+
105
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
106
+ deprecation_message = (
107
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
108
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
109
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
110
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
111
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
112
+ )
113
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
114
+ new_config = dict(scheduler.config)
115
+ new_config["clip_sample"] = False
116
+ scheduler._internal_dict = FrozenDict(new_config)
117
+
118
+ if safety_checker is None and requires_safety_checker:
119
+ logger.warning(
120
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
121
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
122
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
123
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
124
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
125
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
126
+ )
127
+
128
+ if safety_checker is not None and feature_extractor is None:
129
+ raise ValueError(
130
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
131
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
132
+ )
133
+
134
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
135
+ version.parse(unet.config._diffusers_version).base_version
136
+ ) < version.parse("0.9.0.dev0")
137
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
138
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
139
+ deprecation_message = (
140
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
141
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
142
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
143
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
144
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
145
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
146
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
147
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
148
+ " the `unet/config.json` file"
149
+ )
150
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
151
+ new_config = dict(unet.config)
152
+ new_config["sample_size"] = 64
153
+ unet._internal_dict = FrozenDict(new_config)
154
+
155
+ self.register_modules(
156
+ vae=vae,
157
+ text_encoder=text_encoder,
158
+ tokenizer=tokenizer,
159
+ unet=unet,
160
+ scheduler=scheduler,
161
+ safety_checker=safety_checker,
162
+ feature_extractor=feature_extractor,
163
+ )
164
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
165
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
166
+
167
+ def enable_vae_slicing(self):
168
+ r"""
169
+ Enable sliced VAE decoding.
170
+
171
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
172
+ steps. This is useful to save some memory and allow larger batch sizes.
173
+ """
174
+ self.vae.enable_slicing()
175
+
176
+ def disable_vae_slicing(self):
177
+ r"""
178
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
179
+ computing decoding in one step.
180
+ """
181
+ self.vae.disable_slicing()
182
+
183
+ def enable_sequential_cpu_offload(self, gpu_id=0):
184
+ r"""
185
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
186
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
187
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
188
+ """
189
+ if is_accelerate_available():
190
+ from accelerate import cpu_offload
191
+ else:
192
+ raise ImportError("Please install accelerate via `pip install accelerate`")
193
+
194
+ device = torch.device(f"cuda:{gpu_id}")
195
+
196
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
197
+ if cpu_offloaded_model is not None:
198
+ cpu_offload(cpu_offloaded_model, device)
199
+
200
+ if self.safety_checker is not None:
201
+ # TODO(Patrick) - there is currently a bug with cpu offload of nn.Parameter in accelerate
202
+ # fix by only offloading self.safety_checker for now
203
+ cpu_offload(self.safety_checker.vision_model, device)
204
+
205
+ @property
206
+ def _execution_device(self):
207
+ r"""
208
+ Returns the device on which the pipeline's models will be executed. After calling
209
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
210
+ hooks.
211
+ """
212
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
213
+ return self.device
214
+ for module in self.unet.modules():
215
+ if (
216
+ hasattr(module, "_hf_hook")
217
+ and hasattr(module._hf_hook, "execution_device")
218
+ and module._hf_hook.execution_device is not None
219
+ ):
220
+ return torch.device(module._hf_hook.execution_device)
221
+ return self.device
222
+
223
+ def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
224
+ r"""
225
+ Encodes the prompt into text encoder hidden states.
226
+
227
+ Args:
228
+ prompt (`str` or `list(int)`):
229
+ prompt to be encoded
230
+ device: (`torch.device`):
231
+ torch device
232
+ num_images_per_prompt (`int`):
233
+ number of images that should be generated per prompt
234
+ do_classifier_free_guidance (`bool`):
235
+ whether to use classifier free guidance or not
236
+ negative_prompt (`str` or `List[str]`):
237
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
238
+ if `guidance_scale` is less than `1`).
239
+ """
240
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
241
+
242
+ text_inputs = self.tokenizer(
243
+ prompt,
244
+ padding="max_length",
245
+ max_length=self.tokenizer.model_max_length,
246
+ truncation=True,
247
+ return_tensors="pt",
248
+ )
249
+ text_input_ids = text_inputs.input_ids
250
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
251
+
252
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
253
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
254
+ logger.warning(
255
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
256
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
257
+ )
258
+
259
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
260
+ attention_mask = text_inputs.attention_mask.to(device)
261
+ else:
262
+ attention_mask = None
263
+
264
+ text_embeddings = self.text_encoder(
265
+ text_input_ids.to(device),
266
+ attention_mask=attention_mask,
267
+ )
268
+ text_embeddings = text_embeddings[0]
269
+
270
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
271
+ bs_embed, seq_len, _ = text_embeddings.shape
272
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
273
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
274
+
275
+ # get unconditional embeddings for classifier free guidance
276
+ if do_classifier_free_guidance:
277
+ uncond_tokens: List[str]
278
+ if negative_prompt is None:
279
+ uncond_tokens = [""] * batch_size
280
+ elif type(prompt) is not type(negative_prompt):
281
+ raise TypeError(
282
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
283
+ f" {type(prompt)}."
284
+ )
285
+ elif isinstance(negative_prompt, str):
286
+ uncond_tokens = [negative_prompt]
287
+ elif batch_size != len(negative_prompt):
288
+ raise ValueError(
289
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
290
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
291
+ " the batch size of `prompt`."
292
+ )
293
+ else:
294
+ uncond_tokens = negative_prompt
295
+
296
+ max_length = text_input_ids.shape[-1]
297
+ uncond_input = self.tokenizer(
298
+ uncond_tokens,
299
+ padding="max_length",
300
+ max_length=max_length,
301
+ truncation=True,
302
+ return_tensors="pt",
303
+ )
304
+
305
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
306
+ attention_mask = uncond_input.attention_mask.to(device)
307
+ else:
308
+ attention_mask = None
309
+
310
+ uncond_embeddings = self.text_encoder(
311
+ uncond_input.input_ids.to(device),
312
+ attention_mask=attention_mask,
313
+ )
314
+ uncond_embeddings = uncond_embeddings[0]
315
+
316
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
317
+ seq_len = uncond_embeddings.shape[1]
318
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
319
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
320
+
321
+ # For classifier free guidance, we need to do two forward passes.
322
+ # Here we concatenate the unconditional and text embeddings into a single batch
323
+ # to avoid doing two forward passes
324
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
325
+
326
+ return text_embeddings
327
+
328
+ def run_safety_checker(self, image, device, dtype):
329
+ if self.safety_checker is not None:
330
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
331
+ image, has_nsfw_concept = self.safety_checker(
332
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
333
+ )
334
+ else:
335
+ has_nsfw_concept = None
336
+ return image, has_nsfw_concept
337
+
338
+ def decode_latents(self, latents):
339
+ latents = 1 / 0.18215 * latents
340
+ image = self.vae.decode(latents).sample
341
+ image = (image / 2 + 0.5).clamp(0, 1)
342
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
343
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
344
+ return image
345
+
346
+ def prepare_extra_step_kwargs(self, generator, eta):
347
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
348
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
349
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
350
+ # and should be between [0, 1]
351
+
352
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
353
+ extra_step_kwargs = {}
354
+ if accepts_eta:
355
+ extra_step_kwargs["eta"] = eta
356
+
357
+ # check if the scheduler accepts generator
358
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
359
+ if accepts_generator:
360
+ extra_step_kwargs["generator"] = generator
361
+ return extra_step_kwargs
362
+
363
+ def check_inputs(self, prompt, height, width, callback_steps):
364
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
365
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
366
+
367
+ if height % 8 != 0 or width % 8 != 0:
368
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
369
+
370
+ if (callback_steps is None) or (
371
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
372
+ ):
373
+ raise ValueError(
374
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
375
+ f" {type(callback_steps)}."
376
+ )
377
+
378
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
379
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
380
+ if latents is None:
381
+ if device.type == "mps":
382
+ # randn does not work reproducibly on mps
383
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
384
+ else:
385
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
386
+ else:
387
+ if latents.shape != shape:
388
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
389
+ latents = latents.to(device)
390
+
391
+ # scale the initial noise by the standard deviation required by the scheduler
392
+ latents = latents * self.scheduler.init_noise_sigma
393
+ return latents
394
+
395
+ @torch.no_grad()
396
+ def __call__(
397
+ self,
398
+ prompt: Union[str, List[str]],
399
+ height: Optional[int] = None,
400
+ width: Optional[int] = None,
401
+ num_inference_steps: int = 50,
402
+ guidance_scale: float = 7.5,
403
+ negative_prompt: Optional[Union[str, List[str]]] = None,
404
+ num_images_per_prompt: Optional[int] = 1,
405
+ eta: float = 0.0,
406
+ generator: Optional[torch.Generator] = None,
407
+ latents: Optional[torch.FloatTensor] = None,
408
+ output_type: Optional[str] = "pil",
409
+ return_dict: bool = True,
410
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
411
+ callback_steps: int = 1,
412
+ weights: Optional[str] = "",
413
+ ):
414
+ r"""
415
+ Function invoked when calling the pipeline for generation.
416
+
417
+ Args:
418
+ prompt (`str` or `List[str]`):
419
+ The prompt or prompts to guide the image generation.
420
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
421
+ The height in pixels of the generated image.
422
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
423
+ The width in pixels of the generated image.
424
+ num_inference_steps (`int`, *optional*, defaults to 50):
425
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
426
+ expense of slower inference.
427
+ guidance_scale (`float`, *optional*, defaults to 5.0):
428
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
429
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
430
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
431
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
432
+ usually at the expense of lower image quality.
433
+ negative_prompt (`str` or `List[str]`, *optional*):
434
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
435
+ if `guidance_scale` is less than `1`).
436
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
437
+ The number of images to generate per prompt.
438
+ eta (`float`, *optional*, defaults to 0.0):
439
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
440
+ [`schedulers.DDIMScheduler`], will be ignored for others.
441
+ generator (`torch.Generator`, *optional*):
442
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
443
+ deterministic.
444
+ latents (`torch.FloatTensor`, *optional*):
445
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
446
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
447
+ tensor will ge generated by sampling using the supplied random `generator`.
448
+ output_type (`str`, *optional*, defaults to `"pil"`):
449
+ The output format of the generate image. Choose between
450
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
451
+ return_dict (`bool`, *optional*, defaults to `True`):
452
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
453
+ plain tuple.
454
+ callback (`Callable`, *optional*):
455
+ A function that will be called every `callback_steps` steps during inference. The function will be
456
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
457
+ callback_steps (`int`, *optional*, defaults to 1):
458
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
459
+ called at every step.
460
+
461
+ Returns:
462
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
463
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
464
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
465
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
466
+ (nsfw) content, according to the `safety_checker`.
467
+ """
468
+ # 0. Default height and width to unet
469
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
470
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
471
+
472
+ # 1. Check inputs. Raise error if not correct
473
+ self.check_inputs(prompt, height, width, callback_steps)
474
+
475
+ # 2. Define call parameters
476
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
477
+ device = self._execution_device
478
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
479
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
480
+ # corresponds to doing no classifier free guidance.
481
+ do_classifier_free_guidance = guidance_scale > 1.0
482
+
483
+ if "|" in prompt:
484
+ prompt = [x.strip() for x in prompt.split("|")]
485
+ print(f"composing {prompt}...")
486
+
487
+ if not weights:
488
+ # specify weights for prompts (excluding the unconditional score)
489
+ print("using equal positive weights (conjunction) for all prompts...")
490
+ weights = torch.tensor([guidance_scale] * len(prompt), device=self.device).reshape(-1, 1, 1, 1)
491
+ else:
492
+ # set prompt weight for each
493
+ num_prompts = len(prompt) if isinstance(prompt, list) else 1
494
+ weights = [float(w.strip()) for w in weights.split("|")]
495
+ # guidance scale as the default
496
+ if len(weights) < num_prompts:
497
+ weights.append(guidance_scale)
498
+ else:
499
+ weights = weights[:num_prompts]
500
+ assert len(weights) == len(prompt), "weights specified are not equal to the number of prompts"
501
+ weights = torch.tensor(weights, device=self.device).reshape(-1, 1, 1, 1)
502
+ else:
503
+ weights = guidance_scale
504
+
505
+ # 3. Encode input prompt
506
+ text_embeddings = self._encode_prompt(
507
+ prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
508
+ )
509
+
510
+ # 4. Prepare timesteps
511
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
512
+ timesteps = self.scheduler.timesteps
513
+
514
+ # 5. Prepare latent variables
515
+ num_channels_latents = self.unet.config.in_channels
516
+ latents = self.prepare_latents(
517
+ batch_size * num_images_per_prompt,
518
+ num_channels_latents,
519
+ height,
520
+ width,
521
+ text_embeddings.dtype,
522
+ device,
523
+ generator,
524
+ latents,
525
+ )
526
+
527
+ # composable diffusion
528
+ if isinstance(prompt, list) and batch_size == 1:
529
+ # remove extra unconditional embedding
530
+ # N = one unconditional embed + conditional embeds
531
+ text_embeddings = text_embeddings[len(prompt) - 1 :]
532
+
533
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
534
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
535
+
536
+ # 7. Denoising loop
537
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
538
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
539
+ for i, t in enumerate(timesteps):
540
+ # expand the latents if we are doing classifier free guidance
541
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
542
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
543
+
544
+ # predict the noise residual
545
+ noise_pred = []
546
+ for j in range(text_embeddings.shape[0]):
547
+ noise_pred.append(
548
+ self.unet(latent_model_input[:1], t, encoder_hidden_states=text_embeddings[j : j + 1]).sample
549
+ )
550
+ noise_pred = torch.cat(noise_pred, dim=0)
551
+
552
+ # perform guidance
553
+ if do_classifier_free_guidance:
554
+ noise_pred_uncond, noise_pred_text = noise_pred[:1], noise_pred[1:]
555
+ noise_pred = noise_pred_uncond + (weights * (noise_pred_text - noise_pred_uncond)).sum(
556
+ dim=0, keepdims=True
557
+ )
558
+
559
+ # compute the previous noisy sample x_t -> x_t-1
560
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
561
+
562
+ # call the callback, if provided
563
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
564
+ progress_bar.update()
565
+ if callback is not None and i % callback_steps == 0:
566
+ step_idx = i // getattr(self.scheduler, "order", 1)
567
+ callback(step_idx, t, latents)
568
+
569
+ # 8. Post-processing
570
+ image = self.decode_latents(latents)
571
+
572
+ # 9. Run safety checker
573
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
574
+
575
+ # 10. Convert to PIL
576
+ if output_type == "pil":
577
+ image = self.numpy_to_pil(image)
578
+
579
+ if not return_dict:
580
+ return (image, has_nsfw_concept)
581
+
582
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/ddim_noise_comparative_analysis.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import List, Optional, Tuple, Union
16
+
17
+ import PIL.Image
18
+ import torch
19
+ from torchvision import transforms
20
+
21
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
+ from diffusers.schedulers import DDIMScheduler
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ trans = transforms.Compose(
27
+ [
28
+ transforms.Resize((256, 256)),
29
+ transforms.ToTensor(),
30
+ transforms.Normalize([0.5], [0.5]),
31
+ ]
32
+ )
33
+
34
+
35
+ def preprocess(image):
36
+ if isinstance(image, torch.Tensor):
37
+ return image
38
+ elif isinstance(image, PIL.Image.Image):
39
+ image = [image]
40
+
41
+ image = [trans(img.convert("RGB")) for img in image]
42
+ image = torch.stack(image)
43
+ return image
44
+
45
+
46
+ class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline):
47
+ r"""
48
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
+
51
+ Parameters:
52
+ unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
53
+ scheduler ([`SchedulerMixin`]):
54
+ A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
55
+ [`DDPMScheduler`], or [`DDIMScheduler`].
56
+ """
57
+
58
+ def __init__(self, unet, scheduler):
59
+ super().__init__()
60
+
61
+ # make sure scheduler can always be converted to DDIM
62
+ scheduler = DDIMScheduler.from_config(scheduler.config)
63
+
64
+ self.register_modules(unet=unet, scheduler=scheduler)
65
+
66
+ def check_inputs(self, strength):
67
+ if strength < 0 or strength > 1:
68
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
69
+
70
+ def get_timesteps(self, num_inference_steps, strength, device):
71
+ # get the original timestep using init_timestep
72
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
73
+
74
+ t_start = max(num_inference_steps - init_timestep, 0)
75
+ timesteps = self.scheduler.timesteps[t_start:]
76
+
77
+ return timesteps, num_inference_steps - t_start
78
+
79
+ def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
80
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
81
+ raise ValueError(
82
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
83
+ )
84
+
85
+ init_latents = image.to(device=device, dtype=dtype)
86
+
87
+ if isinstance(generator, list) and len(generator) != batch_size:
88
+ raise ValueError(
89
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
90
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
91
+ )
92
+
93
+ shape = init_latents.shape
94
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
95
+
96
+ # get latents
97
+ print("add noise to latents at timestep", timestep)
98
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
99
+ latents = init_latents
100
+
101
+ return latents
102
+
103
+ @torch.no_grad()
104
+ def __call__(
105
+ self,
106
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
107
+ strength: float = 0.8,
108
+ batch_size: int = 1,
109
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
110
+ eta: float = 0.0,
111
+ num_inference_steps: int = 50,
112
+ use_clipped_model_output: Optional[bool] = None,
113
+ output_type: Optional[str] = "pil",
114
+ return_dict: bool = True,
115
+ ) -> Union[ImagePipelineOutput, Tuple]:
116
+ r"""
117
+ Args:
118
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
119
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
120
+ process.
121
+ strength (`float`, *optional*, defaults to 0.8):
122
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
123
+ will be used as a starting point, adding more noise to it the larger the `strength`. The number of
124
+ denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
125
+ be maximum and the denoising process will run for the full number of iterations specified in
126
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
127
+ batch_size (`int`, *optional*, defaults to 1):
128
+ The number of images to generate.
129
+ generator (`torch.Generator`, *optional*):
130
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
131
+ to make generation deterministic.
132
+ eta (`float`, *optional*, defaults to 0.0):
133
+ The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
134
+ num_inference_steps (`int`, *optional*, defaults to 50):
135
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
136
+ expense of slower inference.
137
+ use_clipped_model_output (`bool`, *optional*, defaults to `None`):
138
+ if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
139
+ downstream to the scheduler. So use `None` for schedulers which don't support this argument.
140
+ output_type (`str`, *optional*, defaults to `"pil"`):
141
+ The output format of the generate image. Choose between
142
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
143
+ return_dict (`bool`, *optional*, defaults to `True`):
144
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
145
+
146
+ Returns:
147
+ [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
148
+ True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
149
+ """
150
+ # 1. Check inputs. Raise error if not correct
151
+ self.check_inputs(strength)
152
+
153
+ # 2. Preprocess image
154
+ image = preprocess(image)
155
+
156
+ # 3. set timesteps
157
+ self.scheduler.set_timesteps(num_inference_steps, device=self.device)
158
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
159
+ latent_timestep = timesteps[:1].repeat(batch_size)
160
+
161
+ # 4. Prepare latent variables
162
+ latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator)
163
+ image = latents
164
+
165
+ # 5. Denoising loop
166
+ for t in self.progress_bar(timesteps):
167
+ # 1. predict noise model_output
168
+ model_output = self.unet(image, t).sample
169
+
170
+ # 2. predict previous mean of image x_t-1 and add variance depending on eta
171
+ # eta corresponds to η in paper and should be between [0, 1]
172
+ # do x_t -> x_t-1
173
+ image = self.scheduler.step(
174
+ model_output,
175
+ t,
176
+ image,
177
+ eta=eta,
178
+ use_clipped_model_output=use_clipped_model_output,
179
+ generator=generator,
180
+ ).prev_sample
181
+
182
+ image = (image / 2 + 0.5).clamp(0, 1)
183
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
184
+ if output_type == "pil":
185
+ image = self.numpy_to_pil(image)
186
+
187
+ if not return_dict:
188
+ return (image, latent_timestep.item())
189
+
190
+ return ImagePipelineOutput(images=image)
v0.26.3/dps_pipeline.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from math import pi
17
+ from typing import Callable, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ from PIL import Image
22
+
23
+ from diffusers import DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DModel
24
+ from diffusers.utils.torch_utils import randn_tensor
25
+
26
+
27
+ class DPSPipeline(DiffusionPipeline):
28
+ r"""
29
+ Pipeline for Diffusion Posterior Sampling.
30
+
31
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
32
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
33
+
34
+ Parameters:
35
+ unet ([`UNet2DModel`]):
36
+ A `UNet2DModel` to denoise the encoded image latents.
37
+ scheduler ([`SchedulerMixin`]):
38
+ A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
39
+ [`DDPMScheduler`], or [`DDIMScheduler`].
40
+ """
41
+
42
+ model_cpu_offload_seq = "unet"
43
+
44
+ def __init__(self, unet, scheduler):
45
+ super().__init__()
46
+ self.register_modules(unet=unet, scheduler=scheduler)
47
+
48
+ @torch.no_grad()
49
+ def __call__(
50
+ self,
51
+ measurement: torch.Tensor,
52
+ operator: torch.nn.Module,
53
+ loss_fn: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
54
+ batch_size: int = 1,
55
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
56
+ num_inference_steps: int = 1000,
57
+ output_type: Optional[str] = "pil",
58
+ return_dict: bool = True,
59
+ zeta: float = 0.3,
60
+ ) -> Union[ImagePipelineOutput, Tuple]:
61
+ r"""
62
+ The call function to the pipeline for generation.
63
+
64
+ Args:
65
+ measurement (`torch.Tensor`, *required*):
66
+ A 'torch.Tensor', the corrupted image
67
+ operator (`torch.nn.Module`, *required*):
68
+ A 'torch.nn.Module', the operator generating the corrupted image
69
+ loss_fn (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *required*):
70
+ A 'Callable[[torch.Tensor, torch.Tensor], torch.Tensor]', the loss function used
71
+ between the measurements, for most of the cases using RMSE is fine.
72
+ batch_size (`int`, *optional*, defaults to 1):
73
+ The number of images to generate.
74
+ generator (`torch.Generator`, *optional*):
75
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
76
+ generation deterministic.
77
+ num_inference_steps (`int`, *optional*, defaults to 1000):
78
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
79
+ expense of slower inference.
80
+ output_type (`str`, *optional*, defaults to `"pil"`):
81
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
82
+ return_dict (`bool`, *optional*, defaults to `True`):
83
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
84
+
85
+ Example:
86
+
87
+ ```py
88
+ >>> from diffusers import DDPMPipeline
89
+
90
+ >>> # load model and scheduler
91
+ >>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
92
+
93
+ >>> # run pipeline in inference (sample random noise and denoise)
94
+ >>> image = pipe().images[0]
95
+
96
+ >>> # save image
97
+ >>> image.save("ddpm_generated_image.png")
98
+ ```
99
+
100
+ Returns:
101
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
102
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
103
+ returned where the first element is a list with the generated images
104
+ """
105
+ # Sample gaussian noise to begin loop
106
+ if isinstance(self.unet.config.sample_size, int):
107
+ image_shape = (
108
+ batch_size,
109
+ self.unet.config.in_channels,
110
+ self.unet.config.sample_size,
111
+ self.unet.config.sample_size,
112
+ )
113
+ else:
114
+ image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
115
+
116
+ if self.device.type == "mps":
117
+ # randn does not work reproducibly on mps
118
+ image = randn_tensor(image_shape, generator=generator)
119
+ image = image.to(self.device)
120
+ else:
121
+ image = randn_tensor(image_shape, generator=generator, device=self.device)
122
+
123
+ # set step values
124
+ self.scheduler.set_timesteps(num_inference_steps)
125
+
126
+ for t in self.progress_bar(self.scheduler.timesteps):
127
+ with torch.enable_grad():
128
+ # 1. predict noise model_output
129
+ image = image.requires_grad_()
130
+ model_output = self.unet(image, t).sample
131
+
132
+ # 2. compute previous image x'_{t-1} and original prediction x0_{t}
133
+ scheduler_out = self.scheduler.step(model_output, t, image, generator=generator)
134
+ image_pred, origi_pred = scheduler_out.prev_sample, scheduler_out.pred_original_sample
135
+
136
+ # 3. compute y'_t = f(x0_{t})
137
+ measurement_pred = operator(origi_pred)
138
+
139
+ # 4. compute loss = d(y, y'_t-1)
140
+ loss = loss_fn(measurement, measurement_pred)
141
+ loss.backward()
142
+
143
+ print("distance: {0:.4f}".format(loss.item()))
144
+
145
+ with torch.no_grad():
146
+ image_pred = image_pred - zeta * image.grad
147
+ image = image_pred.detach()
148
+
149
+ image = (image / 2 + 0.5).clamp(0, 1)
150
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
151
+ if output_type == "pil":
152
+ image = self.numpy_to_pil(image)
153
+
154
+ if not return_dict:
155
+ return (image,)
156
+
157
+ return ImagePipelineOutput(images=image)
158
+
159
+
160
+ if __name__ == "__main__":
161
+ import scipy
162
+ from torch import nn
163
+ from torchvision.utils import save_image
164
+
165
+ # defining the operators f(.) of y = f(x)
166
+ # super-resolution operator
167
+ class SuperResolutionOperator(nn.Module):
168
+ def __init__(self, in_shape, scale_factor):
169
+ super().__init__()
170
+
171
+ # Resizer local class, do not use outiside the SR operator class
172
+ class Resizer(nn.Module):
173
+ def __init__(self, in_shape, scale_factor=None, output_shape=None, kernel=None, antialiasing=True):
174
+ super(Resizer, self).__init__()
175
+
176
+ # First standardize values and fill missing arguments (if needed) by deriving scale from output shape or vice versa
177
+ scale_factor, output_shape = self.fix_scale_and_size(in_shape, output_shape, scale_factor)
178
+
179
+ # Choose interpolation method, each method has the matching kernel size
180
+ def cubic(x):
181
+ absx = np.abs(x)
182
+ absx2 = absx**2
183
+ absx3 = absx**3
184
+ return (1.5 * absx3 - 2.5 * absx2 + 1) * (absx <= 1) + (
185
+ -0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2
186
+ ) * ((1 < absx) & (absx <= 2))
187
+
188
+ def lanczos2(x):
189
+ return (
190
+ (np.sin(pi * x) * np.sin(pi * x / 2) + np.finfo(np.float32).eps)
191
+ / ((pi**2 * x**2 / 2) + np.finfo(np.float32).eps)
192
+ ) * (abs(x) < 2)
193
+
194
+ def box(x):
195
+ return ((-0.5 <= x) & (x < 0.5)) * 1.0
196
+
197
+ def lanczos3(x):
198
+ return (
199
+ (np.sin(pi * x) * np.sin(pi * x / 3) + np.finfo(np.float32).eps)
200
+ / ((pi**2 * x**2 / 3) + np.finfo(np.float32).eps)
201
+ ) * (abs(x) < 3)
202
+
203
+ def linear(x):
204
+ return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
205
+
206
+ method, kernel_width = {
207
+ "cubic": (cubic, 4.0),
208
+ "lanczos2": (lanczos2, 4.0),
209
+ "lanczos3": (lanczos3, 6.0),
210
+ "box": (box, 1.0),
211
+ "linear": (linear, 2.0),
212
+ None: (cubic, 4.0), # set default interpolation method as cubic
213
+ }.get(kernel)
214
+
215
+ # Antialiasing is only used when downscaling
216
+ antialiasing *= np.any(np.array(scale_factor) < 1)
217
+
218
+ # Sort indices of dimensions according to scale of each dimension. since we are going dim by dim this is efficient
219
+ sorted_dims = np.argsort(np.array(scale_factor))
220
+ self.sorted_dims = [int(dim) for dim in sorted_dims if scale_factor[dim] != 1]
221
+
222
+ # Iterate over dimensions to calculate local weights for resizing and resize each time in one direction
223
+ field_of_view_list = []
224
+ weights_list = []
225
+ for dim in self.sorted_dims:
226
+ # for each coordinate (along 1 dim), calculate which coordinates in the input image affect its result and the
227
+ # weights that multiply the values there to get its result.
228
+ weights, field_of_view = self.contributions(
229
+ in_shape[dim], output_shape[dim], scale_factor[dim], method, kernel_width, antialiasing
230
+ )
231
+
232
+ # convert to torch tensor
233
+ weights = torch.tensor(weights.T, dtype=torch.float32)
234
+
235
+ # We add singleton dimensions to the weight matrix so we can multiply it with the big tensor we get for
236
+ # tmp_im[field_of_view.T], (bsxfun style)
237
+ weights_list.append(
238
+ nn.Parameter(
239
+ torch.reshape(weights, list(weights.shape) + (len(scale_factor) - 1) * [1]),
240
+ requires_grad=False,
241
+ )
242
+ )
243
+ field_of_view_list.append(
244
+ nn.Parameter(
245
+ torch.tensor(field_of_view.T.astype(np.int32), dtype=torch.long), requires_grad=False
246
+ )
247
+ )
248
+
249
+ self.field_of_view = nn.ParameterList(field_of_view_list)
250
+ self.weights = nn.ParameterList(weights_list)
251
+
252
+ def forward(self, in_tensor):
253
+ x = in_tensor
254
+
255
+ # Use the affecting position values and the set of weights to calculate the result of resizing along this 1 dim
256
+ for dim, fov, w in zip(self.sorted_dims, self.field_of_view, self.weights):
257
+ # To be able to act on each dim, we swap so that dim 0 is the wanted dim to resize
258
+ x = torch.transpose(x, dim, 0)
259
+
260
+ # This is a bit of a complicated multiplication: x[field_of_view.T] is a tensor of order image_dims+1.
261
+ # for each pixel in the output-image it matches the positions the influence it from the input image (along 1 dim
262
+ # only, this is why it only adds 1 dim to 5the shape). We then multiply, for each pixel, its set of positions with
263
+ # the matching set of weights. we do this by this big tensor element-wise multiplication (MATLAB bsxfun style:
264
+ # matching dims are multiplied element-wise while singletons mean that the matching dim is all multiplied by the
265
+ # same number
266
+ x = torch.sum(x[fov] * w, dim=0)
267
+
268
+ # Finally we swap back the axes to the original order
269
+ x = torch.transpose(x, dim, 0)
270
+
271
+ return x
272
+
273
+ def fix_scale_and_size(self, input_shape, output_shape, scale_factor):
274
+ # First fixing the scale-factor (if given) to be standardized the function expects (a list of scale factors in the
275
+ # same size as the number of input dimensions)
276
+ if scale_factor is not None:
277
+ # By default, if scale-factor is a scalar we assume 2d resizing and duplicate it.
278
+ if np.isscalar(scale_factor) and len(input_shape) > 1:
279
+ scale_factor = [scale_factor, scale_factor]
280
+
281
+ # We extend the size of scale-factor list to the size of the input by assigning 1 to all the unspecified scales
282
+ scale_factor = list(scale_factor)
283
+ scale_factor = [1] * (len(input_shape) - len(scale_factor)) + scale_factor
284
+
285
+ # Fixing output-shape (if given): extending it to the size of the input-shape, by assigning the original input-size
286
+ # to all the unspecified dimensions
287
+ if output_shape is not None:
288
+ output_shape = list(input_shape[len(output_shape) :]) + list(np.uint(np.array(output_shape)))
289
+
290
+ # Dealing with the case of non-give scale-factor, calculating according to output-shape. note that this is
291
+ # sub-optimal, because there can be different scales to the same output-shape.
292
+ if scale_factor is None:
293
+ scale_factor = 1.0 * np.array(output_shape) / np.array(input_shape)
294
+
295
+ # Dealing with missing output-shape. calculating according to scale-factor
296
+ if output_shape is None:
297
+ output_shape = np.uint(np.ceil(np.array(input_shape) * np.array(scale_factor)))
298
+
299
+ return scale_factor, output_shape
300
+
301
+ def contributions(self, in_length, out_length, scale, kernel, kernel_width, antialiasing):
302
+ # This function calculates a set of 'filters' and a set of field_of_view that will later on be applied
303
+ # such that each position from the field_of_view will be multiplied with a matching filter from the
304
+ # 'weights' based on the interpolation method and the distance of the sub-pixel location from the pixel centers
305
+ # around it. This is only done for one dimension of the image.
306
+
307
+ # When anti-aliasing is activated (default and only for downscaling) the receptive field is stretched to size of
308
+ # 1/sf. this means filtering is more 'low-pass filter'.
309
+ fixed_kernel = (lambda arg: scale * kernel(scale * arg)) if antialiasing else kernel
310
+ kernel_width *= 1.0 / scale if antialiasing else 1.0
311
+
312
+ # These are the coordinates of the output image
313
+ out_coordinates = np.arange(1, out_length + 1)
314
+
315
+ # since both scale-factor and output size can be provided simulatneously, perserving the center of the image requires shifting
316
+ # the output coordinates. the deviation is because out_length doesn't necesary equal in_length*scale.
317
+ # to keep the center we need to subtract half of this deivation so that we get equal margins for boths sides and center is preserved.
318
+ shifted_out_coordinates = out_coordinates - (out_length - in_length * scale) / 2
319
+
320
+ # These are the matching positions of the output-coordinates on the input image coordinates.
321
+ # Best explained by example: say we have 4 horizontal pixels for HR and we downscale by SF=2 and get 2 pixels:
322
+ # [1,2,3,4] -> [1,2]. Remember each pixel number is the middle of the pixel.
323
+ # The scaling is done between the distances and not pixel numbers (the right boundary of pixel 4 is transformed to
324
+ # the right boundary of pixel 2. pixel 1 in the small image matches the boundary between pixels 1 and 2 in the big
325
+ # one and not to pixel 2. This means the position is not just multiplication of the old pos by scale-factor).
326
+ # So if we measure distance from the left border, middle of pixel 1 is at distance d=0.5, border between 1 and 2 is
327
+ # at d=1, and so on (d = p - 0.5). we calculate (d_new = d_old / sf) which means:
328
+ # (p_new-0.5 = (p_old-0.5) / sf) -> p_new = p_old/sf + 0.5 * (1-1/sf)
329
+ match_coordinates = shifted_out_coordinates / scale + 0.5 * (1 - 1 / scale)
330
+
331
+ # This is the left boundary to start multiplying the filter from, it depends on the size of the filter
332
+ left_boundary = np.floor(match_coordinates - kernel_width / 2)
333
+
334
+ # Kernel width needs to be enlarged because when covering has sub-pixel borders, it must 'see' the pixel centers
335
+ # of the pixels it only covered a part from. So we add one pixel at each side to consider (weights can zeroize them)
336
+ expanded_kernel_width = np.ceil(kernel_width) + 2
337
+
338
+ # Determine a set of field_of_view for each each output position, these are the pixels in the input image
339
+ # that the pixel in the output image 'sees'. We get a matrix whos horizontal dim is the output pixels (big) and the
340
+ # vertical dim is the pixels it 'sees' (kernel_size + 2)
341
+ field_of_view = np.squeeze(
342
+ np.int16(np.expand_dims(left_boundary, axis=1) + np.arange(expanded_kernel_width) - 1)
343
+ )
344
+
345
+ # Assign weight to each pixel in the field of view. A matrix whos horizontal dim is the output pixels and the
346
+ # vertical dim is a list of weights matching to the pixel in the field of view (that are specified in
347
+ # 'field_of_view')
348
+ weights = fixed_kernel(1.0 * np.expand_dims(match_coordinates, axis=1) - field_of_view - 1)
349
+
350
+ # Normalize weights to sum up to 1. be careful from dividing by 0
351
+ sum_weights = np.sum(weights, axis=1)
352
+ sum_weights[sum_weights == 0] = 1.0
353
+ weights = 1.0 * weights / np.expand_dims(sum_weights, axis=1)
354
+
355
+ # We use this mirror structure as a trick for reflection padding at the boundaries
356
+ mirror = np.uint(np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))))
357
+ field_of_view = mirror[np.mod(field_of_view, mirror.shape[0])]
358
+
359
+ # Get rid of weights and pixel positions that are of zero weight
360
+ non_zero_out_pixels = np.nonzero(np.any(weights, axis=0))
361
+ weights = np.squeeze(weights[:, non_zero_out_pixels])
362
+ field_of_view = np.squeeze(field_of_view[:, non_zero_out_pixels])
363
+
364
+ # Final products are the relative positions and the matching weights, both are output_size X fixed_kernel_size
365
+ return weights, field_of_view
366
+
367
+ self.down_sample = Resizer(in_shape, 1 / scale_factor)
368
+ for param in self.parameters():
369
+ param.requires_grad = False
370
+
371
+ def forward(self, data, **kwargs):
372
+ return self.down_sample(data)
373
+
374
+ # Gaussian blurring operator
375
+ class GaussialBlurOperator(nn.Module):
376
+ def __init__(self, kernel_size, intensity):
377
+ super().__init__()
378
+
379
+ class Blurkernel(nn.Module):
380
+ def __init__(self, blur_type="gaussian", kernel_size=31, std=3.0):
381
+ super().__init__()
382
+ self.blur_type = blur_type
383
+ self.kernel_size = kernel_size
384
+ self.std = std
385
+ self.seq = nn.Sequential(
386
+ nn.ReflectionPad2d(self.kernel_size // 2),
387
+ nn.Conv2d(3, 3, self.kernel_size, stride=1, padding=0, bias=False, groups=3),
388
+ )
389
+ self.weights_init()
390
+
391
+ def forward(self, x):
392
+ return self.seq(x)
393
+
394
+ def weights_init(self):
395
+ if self.blur_type == "gaussian":
396
+ n = np.zeros((self.kernel_size, self.kernel_size))
397
+ n[self.kernel_size // 2, self.kernel_size // 2] = 1
398
+ k = scipy.ndimage.gaussian_filter(n, sigma=self.std)
399
+ k = torch.from_numpy(k)
400
+ self.k = k
401
+ for name, f in self.named_parameters():
402
+ f.data.copy_(k)
403
+
404
+ def update_weights(self, k):
405
+ if not torch.is_tensor(k):
406
+ k = torch.from_numpy(k)
407
+ for name, f in self.named_parameters():
408
+ f.data.copy_(k)
409
+
410
+ def get_kernel(self):
411
+ return self.k
412
+
413
+ self.kernel_size = kernel_size
414
+ self.conv = Blurkernel(blur_type="gaussian", kernel_size=kernel_size, std=intensity)
415
+ self.kernel = self.conv.get_kernel()
416
+ self.conv.update_weights(self.kernel.type(torch.float32))
417
+
418
+ for param in self.parameters():
419
+ param.requires_grad = False
420
+
421
+ def forward(self, data, **kwargs):
422
+ return self.conv(data)
423
+
424
+ def transpose(self, data, **kwargs):
425
+ return data
426
+
427
+ def get_kernel(self):
428
+ return self.kernel.view(1, 1, self.kernel_size, self.kernel_size)
429
+
430
+ # assuming the forward process y = f(x) is polluted by Gaussian noise, use l2 norm
431
+ def RMSELoss(yhat, y):
432
+ return torch.sqrt(torch.sum((yhat - y) ** 2))
433
+
434
+ # set up source image
435
+ src = Image.open("sample.png")
436
+ # read image into [1,3,H,W]
437
+ src = torch.from_numpy(np.array(src, dtype=np.float32)).permute(2, 0, 1)[None]
438
+ # normalize image to [-1,1]
439
+ src = (src / 127.5) - 1.0
440
+ src = src.to("cuda")
441
+
442
+ # set up operator and measurement
443
+ # operator = SuperResolutionOperator(in_shape=src.shape, scale_factor=4).to("cuda")
444
+ operator = GaussialBlurOperator(kernel_size=61, intensity=3.0).to("cuda")
445
+ measurement = operator(src)
446
+
447
+ # set up scheduler
448
+ scheduler = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256")
449
+ scheduler.set_timesteps(1000)
450
+
451
+ # set up model
452
+ model = UNet2DModel.from_pretrained("google/ddpm-celebahq-256").to("cuda")
453
+
454
+ save_image((src + 1.0) / 2.0, "dps_src.png")
455
+ save_image((measurement + 1.0) / 2.0, "dps_mea.png")
456
+
457
+ # finally, the pipeline
458
+ dpspipe = DPSPipeline(model, scheduler)
459
+ image = dpspipe(
460
+ measurement=measurement,
461
+ operator=operator,
462
+ loss_fn=RMSELoss,
463
+ zeta=1.0,
464
+ ).images[0]
465
+
466
+ image.save("dps_generated_image.png")
v0.26.3/edict_pipeline.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ from PIL import Image
5
+ from tqdm.auto import tqdm
6
+ from transformers import CLIPTextModel, CLIPTokenizer
7
+
8
+ from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel
9
+ from diffusers.image_processor import VaeImageProcessor
10
+ from diffusers.utils import (
11
+ deprecate,
12
+ )
13
+
14
+
15
+ class EDICTPipeline(DiffusionPipeline):
16
+ def __init__(
17
+ self,
18
+ vae: AutoencoderKL,
19
+ text_encoder: CLIPTextModel,
20
+ tokenizer: CLIPTokenizer,
21
+ unet: UNet2DConditionModel,
22
+ scheduler: DDIMScheduler,
23
+ mixing_coeff: float = 0.93,
24
+ leapfrog_steps: bool = True,
25
+ ):
26
+ self.mixing_coeff = mixing_coeff
27
+ self.leapfrog_steps = leapfrog_steps
28
+
29
+ super().__init__()
30
+ self.register_modules(
31
+ vae=vae,
32
+ text_encoder=text_encoder,
33
+ tokenizer=tokenizer,
34
+ unet=unet,
35
+ scheduler=scheduler,
36
+ )
37
+
38
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
39
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
40
+
41
+ def _encode_prompt(
42
+ self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False
43
+ ):
44
+ text_inputs = self.tokenizer(
45
+ prompt,
46
+ padding="max_length",
47
+ max_length=self.tokenizer.model_max_length,
48
+ truncation=True,
49
+ return_tensors="pt",
50
+ )
51
+
52
+ prompt_embeds = self.text_encoder(text_inputs.input_ids.to(self.device)).last_hidden_state
53
+
54
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=self.device)
55
+
56
+ if do_classifier_free_guidance:
57
+ uncond_tokens = "" if negative_prompt is None else negative_prompt
58
+
59
+ uncond_input = self.tokenizer(
60
+ uncond_tokens,
61
+ padding="max_length",
62
+ max_length=self.tokenizer.model_max_length,
63
+ truncation=True,
64
+ return_tensors="pt",
65
+ )
66
+
67
+ negative_prompt_embeds = self.text_encoder(uncond_input.input_ids.to(self.device)).last_hidden_state
68
+
69
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
70
+
71
+ return prompt_embeds
72
+
73
+ def denoise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
74
+ x = self.mixing_coeff * x + (1 - self.mixing_coeff) * y
75
+ y = self.mixing_coeff * y + (1 - self.mixing_coeff) * x
76
+
77
+ return [x, y]
78
+
79
+ def noise_mixing_layer(self, x: torch.Tensor, y: torch.Tensor):
80
+ y = (y - (1 - self.mixing_coeff) * x) / self.mixing_coeff
81
+ x = (x - (1 - self.mixing_coeff) * y) / self.mixing_coeff
82
+
83
+ return [x, y]
84
+
85
+ def _get_alpha_and_beta(self, t: torch.Tensor):
86
+ # as self.alphas_cumprod is always in cpu
87
+ t = int(t)
88
+
89
+ alpha_prod = self.scheduler.alphas_cumprod[t] if t >= 0 else self.scheduler.final_alpha_cumprod
90
+
91
+ return alpha_prod, 1 - alpha_prod
92
+
93
+ def noise_step(
94
+ self,
95
+ base: torch.Tensor,
96
+ model_input: torch.Tensor,
97
+ model_output: torch.Tensor,
98
+ timestep: torch.Tensor,
99
+ ):
100
+ prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
101
+
102
+ alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
103
+ alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
104
+
105
+ a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
106
+ b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
107
+
108
+ next_model_input = (base - b_t * model_output) / a_t
109
+
110
+ return model_input, next_model_input.to(base.dtype)
111
+
112
+ def denoise_step(
113
+ self,
114
+ base: torch.Tensor,
115
+ model_input: torch.Tensor,
116
+ model_output: torch.Tensor,
117
+ timestep: torch.Tensor,
118
+ ):
119
+ prev_timestep = timestep - self.scheduler.config.num_train_timesteps / self.scheduler.num_inference_steps
120
+
121
+ alpha_prod_t, beta_prod_t = self._get_alpha_and_beta(timestep)
122
+ alpha_prod_t_prev, beta_prod_t_prev = self._get_alpha_and_beta(prev_timestep)
123
+
124
+ a_t = (alpha_prod_t_prev / alpha_prod_t) ** 0.5
125
+ b_t = -a_t * (beta_prod_t**0.5) + beta_prod_t_prev**0.5
126
+ next_model_input = a_t * base + b_t * model_output
127
+
128
+ return model_input, next_model_input.to(base.dtype)
129
+
130
+ @torch.no_grad()
131
+ def decode_latents(self, latents: torch.Tensor):
132
+ latents = 1 / self.vae.config.scaling_factor * latents
133
+ image = self.vae.decode(latents).sample
134
+ image = (image / 2 + 0.5).clamp(0, 1)
135
+ return image
136
+
137
+ @torch.no_grad()
138
+ def prepare_latents(
139
+ self,
140
+ image: Image.Image,
141
+ text_embeds: torch.Tensor,
142
+ timesteps: torch.Tensor,
143
+ guidance_scale: float,
144
+ generator: Optional[torch.Generator] = None,
145
+ ):
146
+ do_classifier_free_guidance = guidance_scale > 1.0
147
+
148
+ image = image.to(device=self.device, dtype=text_embeds.dtype)
149
+ latent = self.vae.encode(image).latent_dist.sample(generator)
150
+
151
+ latent = self.vae.config.scaling_factor * latent
152
+
153
+ coupled_latents = [latent.clone(), latent.clone()]
154
+
155
+ for i, t in tqdm(enumerate(timesteps), total=len(timesteps)):
156
+ coupled_latents = self.noise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
157
+
158
+ # j - model_input index, k - base index
159
+ for j in range(2):
160
+ k = j ^ 1
161
+
162
+ if self.leapfrog_steps:
163
+ if i % 2 == 0:
164
+ k, j = j, k
165
+
166
+ model_input = coupled_latents[j]
167
+ base = coupled_latents[k]
168
+
169
+ latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
170
+
171
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeds).sample
172
+
173
+ if do_classifier_free_guidance:
174
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
175
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
176
+
177
+ base, model_input = self.noise_step(
178
+ base=base,
179
+ model_input=model_input,
180
+ model_output=noise_pred,
181
+ timestep=t,
182
+ )
183
+
184
+ coupled_latents[k] = model_input
185
+
186
+ return coupled_latents
187
+
188
+ @torch.no_grad()
189
+ def __call__(
190
+ self,
191
+ base_prompt: str,
192
+ target_prompt: str,
193
+ image: Image.Image,
194
+ guidance_scale: float = 3.0,
195
+ num_inference_steps: int = 50,
196
+ strength: float = 0.8,
197
+ negative_prompt: Optional[str] = None,
198
+ generator: Optional[torch.Generator] = None,
199
+ output_type: Optional[str] = "pil",
200
+ ):
201
+ do_classifier_free_guidance = guidance_scale > 1.0
202
+
203
+ image = self.image_processor.preprocess(image)
204
+
205
+ base_embeds = self._encode_prompt(base_prompt, negative_prompt, do_classifier_free_guidance)
206
+ target_embeds = self._encode_prompt(target_prompt, negative_prompt, do_classifier_free_guidance)
207
+
208
+ self.scheduler.set_timesteps(num_inference_steps, self.device)
209
+
210
+ t_limit = num_inference_steps - int(num_inference_steps * strength)
211
+ fwd_timesteps = self.scheduler.timesteps[t_limit:]
212
+ bwd_timesteps = fwd_timesteps.flip(0)
213
+
214
+ coupled_latents = self.prepare_latents(image, base_embeds, bwd_timesteps, guidance_scale, generator)
215
+
216
+ for i, t in tqdm(enumerate(fwd_timesteps), total=len(fwd_timesteps)):
217
+ # j - model_input index, k - base index
218
+ for k in range(2):
219
+ j = k ^ 1
220
+
221
+ if self.leapfrog_steps:
222
+ if i % 2 == 1:
223
+ k, j = j, k
224
+
225
+ model_input = coupled_latents[j]
226
+ base = coupled_latents[k]
227
+
228
+ latent_model_input = torch.cat([model_input] * 2) if do_classifier_free_guidance else model_input
229
+
230
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=target_embeds).sample
231
+
232
+ if do_classifier_free_guidance:
233
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
234
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
235
+
236
+ base, model_input = self.denoise_step(
237
+ base=base,
238
+ model_input=model_input,
239
+ model_output=noise_pred,
240
+ timestep=t,
241
+ )
242
+
243
+ coupled_latents[k] = model_input
244
+
245
+ coupled_latents = self.denoise_mixing_layer(x=coupled_latents[0], y=coupled_latents[1])
246
+
247
+ # either one is fine
248
+ final_latent = coupled_latents[0]
249
+
250
+ if output_type not in ["latent", "pt", "np", "pil"]:
251
+ deprecation_message = (
252
+ f"the output_type {output_type} is outdated. Please make sure to set it to one of these instead: "
253
+ "`pil`, `np`, `pt`, `latent`"
254
+ )
255
+ deprecate("Unsupported output_type", "1.0.0", deprecation_message, standard_warn=False)
256
+ output_type = "np"
257
+
258
+ if output_type == "latent":
259
+ image = final_latent
260
+ else:
261
+ image = self.decode_latents(final_latent)
262
+ image = self.image_processor.postprocess(image, output_type=output_type)
263
+
264
+ return image
v0.26.3/gluegen.py ADDED
@@ -0,0 +1,865 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Any, Dict, List, Optional, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from transformers import AutoModel, AutoTokenizer, CLIPImageProcessor
7
+
8
+ from diffusers import DiffusionPipeline
9
+ from diffusers.image_processor import VaeImageProcessor
10
+ from diffusers.loaders import LoraLoaderMixin
11
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
13
+ from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
14
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
15
+ from diffusers.schedulers import KarrasDiffusionSchedulers
16
+ from diffusers.utils import (
17
+ USE_PEFT_BACKEND,
18
+ logging,
19
+ scale_lora_layers,
20
+ unscale_lora_layers,
21
+ )
22
+ from diffusers.utils.torch_utils import randn_tensor
23
+
24
+
25
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
26
+
27
+
28
+ class TranslatorBase(nn.Module):
29
+ def __init__(self, num_tok, dim, dim_out, mult=2):
30
+ super().__init__()
31
+
32
+ self.dim_in = dim
33
+ self.dim_out = dim_out
34
+
35
+ self.net_tok = nn.Sequential(
36
+ nn.Linear(num_tok, int(num_tok * mult)),
37
+ nn.LayerNorm(int(num_tok * mult)),
38
+ nn.GELU(),
39
+ nn.Linear(int(num_tok * mult), int(num_tok * mult)),
40
+ nn.LayerNorm(int(num_tok * mult)),
41
+ nn.GELU(),
42
+ nn.Linear(int(num_tok * mult), num_tok),
43
+ nn.LayerNorm(num_tok),
44
+ )
45
+
46
+ self.net_sen = nn.Sequential(
47
+ nn.Linear(dim, int(dim * mult)),
48
+ nn.LayerNorm(int(dim * mult)),
49
+ nn.GELU(),
50
+ nn.Linear(int(dim * mult), int(dim * mult)),
51
+ nn.LayerNorm(int(dim * mult)),
52
+ nn.GELU(),
53
+ nn.Linear(int(dim * mult), dim_out),
54
+ nn.LayerNorm(dim_out),
55
+ )
56
+
57
+ def forward(self, x):
58
+ if self.dim_in == self.dim_out:
59
+ indentity_0 = x
60
+ x = self.net_sen(x)
61
+ x += indentity_0
62
+ x = x.transpose(1, 2)
63
+
64
+ indentity_1 = x
65
+ x = self.net_tok(x)
66
+ x += indentity_1
67
+ x = x.transpose(1, 2)
68
+ else:
69
+ x = self.net_sen(x)
70
+ x = x.transpose(1, 2)
71
+
72
+ x = self.net_tok(x)
73
+ x = x.transpose(1, 2)
74
+ return x
75
+
76
+
77
+ class TranslatorBaseNoLN(nn.Module):
78
+ def __init__(self, num_tok, dim, dim_out, mult=2):
79
+ super().__init__()
80
+
81
+ self.dim_in = dim
82
+ self.dim_out = dim_out
83
+
84
+ self.net_tok = nn.Sequential(
85
+ nn.Linear(num_tok, int(num_tok * mult)),
86
+ nn.GELU(),
87
+ nn.Linear(int(num_tok * mult), int(num_tok * mult)),
88
+ nn.GELU(),
89
+ nn.Linear(int(num_tok * mult), num_tok),
90
+ )
91
+
92
+ self.net_sen = nn.Sequential(
93
+ nn.Linear(dim, int(dim * mult)),
94
+ nn.GELU(),
95
+ nn.Linear(int(dim * mult), int(dim * mult)),
96
+ nn.GELU(),
97
+ nn.Linear(int(dim * mult), dim_out),
98
+ )
99
+
100
+ def forward(self, x):
101
+ if self.dim_in == self.dim_out:
102
+ indentity_0 = x
103
+ x = self.net_sen(x)
104
+ x += indentity_0
105
+ x = x.transpose(1, 2)
106
+
107
+ indentity_1 = x
108
+ x = self.net_tok(x)
109
+ x += indentity_1
110
+ x = x.transpose(1, 2)
111
+ else:
112
+ x = self.net_sen(x)
113
+ x = x.transpose(1, 2)
114
+
115
+ x = self.net_tok(x)
116
+ x = x.transpose(1, 2)
117
+ return x
118
+
119
+
120
+ class TranslatorNoLN(nn.Module):
121
+ def __init__(self, num_tok, dim, dim_out, mult=2, depth=5):
122
+ super().__init__()
123
+
124
+ self.blocks = nn.ModuleList([TranslatorBase(num_tok, dim, dim, mult=2) for d in range(depth)])
125
+ self.gelu = nn.GELU()
126
+
127
+ self.tail = TranslatorBaseNoLN(num_tok, dim, dim_out, mult=2)
128
+
129
+ def forward(self, x):
130
+ for block in self.blocks:
131
+ x = block(x) + x
132
+ x = self.gelu(x)
133
+
134
+ x = self.tail(x)
135
+ return x
136
+
137
+
138
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
139
+ """
140
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
141
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
142
+ """
143
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
144
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
145
+ # rescale the results from guidance (fixes overexposure)
146
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
147
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
148
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
149
+ return noise_cfg
150
+
151
+
152
+ def retrieve_timesteps(
153
+ scheduler,
154
+ num_inference_steps: Optional[int] = None,
155
+ device: Optional[Union[str, torch.device]] = None,
156
+ timesteps: Optional[List[int]] = None,
157
+ **kwargs,
158
+ ):
159
+ """
160
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
161
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
162
+
163
+ Args:
164
+ scheduler (`SchedulerMixin`):
165
+ The scheduler to get timesteps from.
166
+ num_inference_steps (`int`):
167
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
168
+ `timesteps` must be `None`.
169
+ device (`str` or `torch.device`, *optional*):
170
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
171
+ timesteps (`List[int]`, *optional*):
172
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
173
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
174
+ must be `None`.
175
+
176
+ Returns:
177
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
178
+ second element is the number of inference steps.
179
+ """
180
+ if timesteps is not None:
181
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
182
+ if not accepts_timesteps:
183
+ raise ValueError(
184
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
185
+ f" timestep schedules. Please check whether you are using the correct scheduler."
186
+ )
187
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
188
+ timesteps = scheduler.timesteps
189
+ num_inference_steps = len(timesteps)
190
+ else:
191
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
192
+ timesteps = scheduler.timesteps
193
+ return timesteps, num_inference_steps
194
+
195
+
196
+ class GlueGenStableDiffusionPipeline(DiffusionPipeline, LoraLoaderMixin):
197
+ def __init__(
198
+ self,
199
+ vae: AutoencoderKL,
200
+ text_encoder: AutoModel,
201
+ tokenizer: AutoTokenizer,
202
+ unet: UNet2DConditionModel,
203
+ scheduler: KarrasDiffusionSchedulers,
204
+ safety_checker: StableDiffusionSafetyChecker,
205
+ feature_extractor: CLIPImageProcessor,
206
+ language_adapter: TranslatorNoLN = None,
207
+ tensor_norm: torch.FloatTensor = None,
208
+ requires_safety_checker: bool = True,
209
+ ):
210
+ super().__init__()
211
+
212
+ self.register_modules(
213
+ vae=vae,
214
+ text_encoder=text_encoder,
215
+ tokenizer=tokenizer,
216
+ unet=unet,
217
+ scheduler=scheduler,
218
+ safety_checker=safety_checker,
219
+ feature_extractor=feature_extractor,
220
+ language_adapter=language_adapter,
221
+ tensor_norm=tensor_norm,
222
+ )
223
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
224
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
225
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
226
+
227
+ def load_language_adapter(
228
+ self,
229
+ model_path: str,
230
+ num_token: int,
231
+ dim: int,
232
+ dim_out: int,
233
+ tensor_norm: torch.FloatTensor,
234
+ mult: int = 2,
235
+ depth: int = 5,
236
+ ):
237
+ device = self._execution_device
238
+ self.tensor_norm = tensor_norm.to(device)
239
+ self.language_adapter = TranslatorNoLN(num_tok=num_token, dim=dim, dim_out=dim_out, mult=mult, depth=depth).to(
240
+ device
241
+ )
242
+ self.language_adapter.load_state_dict(torch.load(model_path))
243
+
244
+ def enable_vae_slicing(self):
245
+ r"""
246
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
247
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
248
+ """
249
+ self.vae.enable_slicing()
250
+
251
+ def disable_vae_slicing(self):
252
+ r"""
253
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
254
+ computing decoding in one step.
255
+ """
256
+ self.vae.disable_slicing()
257
+
258
+ def enable_vae_tiling(self):
259
+ r"""
260
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
261
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
262
+ processing larger images.
263
+ """
264
+ self.vae.enable_tiling()
265
+
266
+ def disable_vae_tiling(self):
267
+ r"""
268
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
269
+ computing decoding in one step.
270
+ """
271
+ self.vae.disable_tiling()
272
+
273
+ def _adapt_language(self, prompt_embeds: torch.FloatTensor):
274
+ prompt_embeds = prompt_embeds / 3
275
+ prompt_embeds = self.language_adapter(prompt_embeds) * (self.tensor_norm / 2)
276
+ return prompt_embeds
277
+
278
+ def encode_prompt(
279
+ self,
280
+ prompt,
281
+ device,
282
+ num_images_per_prompt,
283
+ do_classifier_free_guidance,
284
+ negative_prompt=None,
285
+ prompt_embeds: Optional[torch.FloatTensor] = None,
286
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
287
+ lora_scale: Optional[float] = None,
288
+ clip_skip: Optional[int] = None,
289
+ ):
290
+ r"""
291
+ Encodes the prompt into text encoder hidden states.
292
+
293
+ Args:
294
+ prompt (`str` or `List[str]`, *optional*):
295
+ prompt to be encoded
296
+ device: (`torch.device`):
297
+ torch device
298
+ num_images_per_prompt (`int`):
299
+ number of images that should be generated per prompt
300
+ do_classifier_free_guidance (`bool`):
301
+ whether to use classifier free guidance or not
302
+ negative_prompt (`str` or `List[str]`, *optional*):
303
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
304
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
305
+ less than `1`).
306
+ prompt_embeds (`torch.FloatTensor`, *optional*):
307
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
308
+ provided, text embeddings will be generated from `prompt` input argument.
309
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
310
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
311
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
312
+ argument.
313
+ lora_scale (`float`, *optional*):
314
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
315
+ clip_skip (`int`, *optional*):
316
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
317
+ the output of the pre-final layer will be used for computing the prompt embeddings.
318
+ """
319
+ # set lora scale so that monkey patched LoRA
320
+ # function of text encoder can correctly access it
321
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
322
+ self._lora_scale = lora_scale
323
+
324
+ # dynamically adjust the LoRA scale
325
+ if not USE_PEFT_BACKEND:
326
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
327
+ else:
328
+ scale_lora_layers(self.text_encoder, lora_scale)
329
+
330
+ if prompt is not None and isinstance(prompt, str):
331
+ batch_size = 1
332
+ elif prompt is not None and isinstance(prompt, list):
333
+ batch_size = len(prompt)
334
+ else:
335
+ batch_size = prompt_embeds.shape[0]
336
+
337
+ if prompt_embeds is None:
338
+ text_inputs = self.tokenizer(
339
+ prompt,
340
+ padding="max_length",
341
+ max_length=self.tokenizer.model_max_length,
342
+ truncation=True,
343
+ return_tensors="pt",
344
+ )
345
+ text_input_ids = text_inputs.input_ids
346
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
347
+
348
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
349
+ text_input_ids, untruncated_ids
350
+ ):
351
+ removed_text = self.tokenizer.batch_decode(
352
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
353
+ )
354
+ logger.warning(
355
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
356
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
357
+ )
358
+
359
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
360
+ attention_mask = text_inputs.attention_mask.to(device)
361
+ elif self.language_adapter is not None:
362
+ attention_mask = text_inputs.attention_mask.to(device)
363
+ else:
364
+ attention_mask = None
365
+
366
+ if clip_skip is None:
367
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
368
+ prompt_embeds = prompt_embeds[0]
369
+
370
+ else:
371
+ prompt_embeds = self.text_encoder(
372
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
373
+ )
374
+ # Access the `hidden_states` first, that contains a tuple of
375
+ # all the hidden states from the encoder layers. Then index into
376
+ # the tuple to access the hidden states from the desired layer.
377
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
378
+ # We also need to apply the final LayerNorm here to not mess with the
379
+ # representations. The `last_hidden_states` that we typically use for
380
+ # obtaining the final prompt representations passes through the LayerNorm
381
+ # layer.
382
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
383
+
384
+ # Run prompt language adapter
385
+ if self.language_adapter is not None:
386
+ prompt_embeds = self._adapt_language(prompt_embeds)
387
+
388
+ if self.text_encoder is not None:
389
+ prompt_embeds_dtype = self.text_encoder.dtype
390
+ elif self.unet is not None:
391
+ prompt_embeds_dtype = self.unet.dtype
392
+ else:
393
+ prompt_embeds_dtype = prompt_embeds.dtype
394
+
395
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
396
+
397
+ bs_embed, seq_len, _ = prompt_embeds.shape
398
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
399
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
400
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
401
+
402
+ # get unconditional embeddings for classifier free guidance
403
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
404
+ uncond_tokens: List[str]
405
+ if negative_prompt is None:
406
+ uncond_tokens = [""] * batch_size
407
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
408
+ raise TypeError(
409
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
410
+ f" {type(prompt)}."
411
+ )
412
+ elif isinstance(negative_prompt, str):
413
+ uncond_tokens = [negative_prompt]
414
+ elif batch_size != len(negative_prompt):
415
+ raise ValueError(
416
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
417
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
418
+ " the batch size of `prompt`."
419
+ )
420
+ else:
421
+ uncond_tokens = negative_prompt
422
+
423
+ max_length = prompt_embeds.shape[1]
424
+ uncond_input = self.tokenizer(
425
+ uncond_tokens,
426
+ padding="max_length",
427
+ max_length=max_length,
428
+ truncation=True,
429
+ return_tensors="pt",
430
+ )
431
+
432
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
433
+ attention_mask = uncond_input.attention_mask.to(device)
434
+ else:
435
+ attention_mask = None
436
+
437
+ negative_prompt_embeds = self.text_encoder(
438
+ uncond_input.input_ids.to(device),
439
+ attention_mask=attention_mask,
440
+ )
441
+ negative_prompt_embeds = negative_prompt_embeds[0]
442
+ # Run negative prompt language adapter
443
+ if self.language_adapter is not None:
444
+ negative_prompt_embeds = self._adapt_language(negative_prompt_embeds)
445
+
446
+ if do_classifier_free_guidance:
447
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
448
+ seq_len = negative_prompt_embeds.shape[1]
449
+
450
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
451
+
452
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
453
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
454
+
455
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
456
+ # Retrieve the original scale by scaling back the LoRA layers
457
+ unscale_lora_layers(self.text_encoder, lora_scale)
458
+
459
+ return prompt_embeds, negative_prompt_embeds
460
+
461
+ def run_safety_checker(self, image, device, dtype):
462
+ if self.safety_checker is None:
463
+ has_nsfw_concept = None
464
+ else:
465
+ if torch.is_tensor(image):
466
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
467
+ else:
468
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
469
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
470
+ image, has_nsfw_concept = self.safety_checker(
471
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
472
+ )
473
+ return image, has_nsfw_concept
474
+
475
+ def prepare_extra_step_kwargs(self, generator, eta):
476
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
477
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
478
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
479
+ # and should be between [0, 1]
480
+
481
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
482
+ extra_step_kwargs = {}
483
+ if accepts_eta:
484
+ extra_step_kwargs["eta"] = eta
485
+
486
+ # check if the scheduler accepts generator
487
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
488
+ if accepts_generator:
489
+ extra_step_kwargs["generator"] = generator
490
+ return extra_step_kwargs
491
+
492
+ def check_inputs(
493
+ self,
494
+ prompt,
495
+ height,
496
+ width,
497
+ negative_prompt=None,
498
+ prompt_embeds=None,
499
+ negative_prompt_embeds=None,
500
+ ):
501
+ if height % 8 != 0 or width % 8 != 0:
502
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
503
+
504
+ if prompt is not None and prompt_embeds is not None:
505
+ raise ValueError(
506
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
507
+ " only forward one of the two."
508
+ )
509
+ elif prompt is None and prompt_embeds is None:
510
+ raise ValueError(
511
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
512
+ )
513
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
514
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
515
+
516
+ if negative_prompt is not None and negative_prompt_embeds is not None:
517
+ raise ValueError(
518
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
519
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
520
+ )
521
+
522
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
523
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
524
+ raise ValueError(
525
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
526
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
527
+ f" {negative_prompt_embeds.shape}."
528
+ )
529
+
530
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
531
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
532
+ if isinstance(generator, list) and len(generator) != batch_size:
533
+ raise ValueError(
534
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
535
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
536
+ )
537
+
538
+ if latents is None:
539
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
540
+ else:
541
+ latents = latents.to(device)
542
+
543
+ # scale the initial noise by the standard deviation required by the scheduler
544
+ latents = latents * self.scheduler.init_noise_sigma
545
+ return latents
546
+
547
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
548
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
549
+
550
+ The suffixes after the scaling factors represent the stages where they are being applied.
551
+
552
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
553
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
554
+
555
+ Args:
556
+ s1 (`float`):
557
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
558
+ mitigate "oversmoothing effect" in the enhanced denoising process.
559
+ s2 (`float`):
560
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
561
+ mitigate "oversmoothing effect" in the enhanced denoising process.
562
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
563
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
564
+ """
565
+ if not hasattr(self, "unet"):
566
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
567
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
568
+
569
+ def disable_freeu(self):
570
+ """Disables the FreeU mechanism if enabled."""
571
+ self.unet.disable_freeu()
572
+
573
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
574
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
575
+ """
576
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
577
+
578
+ Args:
579
+ timesteps (`torch.Tensor`):
580
+ generate embedding vectors at these timesteps
581
+ embedding_dim (`int`, *optional*, defaults to 512):
582
+ dimension of the embeddings to generate
583
+ dtype:
584
+ data type of the generated embeddings
585
+
586
+ Returns:
587
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
588
+ """
589
+ assert len(w.shape) == 1
590
+ w = w * 1000.0
591
+
592
+ half_dim = embedding_dim // 2
593
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
594
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
595
+ emb = w.to(dtype)[:, None] * emb[None, :]
596
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
597
+ if embedding_dim % 2 == 1: # zero pad
598
+ emb = torch.nn.functional.pad(emb, (0, 1))
599
+ assert emb.shape == (w.shape[0], embedding_dim)
600
+ return emb
601
+
602
+ @property
603
+ def guidance_scale(self):
604
+ return self._guidance_scale
605
+
606
+ @property
607
+ def guidance_rescale(self):
608
+ return self._guidance_rescale
609
+
610
+ @property
611
+ def clip_skip(self):
612
+ return self._clip_skip
613
+
614
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
615
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
616
+ # corresponds to doing no classifier free guidance.
617
+ @property
618
+ def do_classifier_free_guidance(self):
619
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
620
+
621
+ @property
622
+ def cross_attention_kwargs(self):
623
+ return self._cross_attention_kwargs
624
+
625
+ @property
626
+ def num_timesteps(self):
627
+ return self._num_timesteps
628
+
629
+ @property
630
+ def interrupt(self):
631
+ return self._interrupt
632
+
633
+ @torch.no_grad()
634
+ def __call__(
635
+ self,
636
+ prompt: Union[str, List[str]] = None,
637
+ height: Optional[int] = None,
638
+ width: Optional[int] = None,
639
+ num_inference_steps: int = 50,
640
+ timesteps: List[int] = None,
641
+ guidance_scale: float = 7.5,
642
+ negative_prompt: Optional[Union[str, List[str]]] = None,
643
+ num_images_per_prompt: Optional[int] = 1,
644
+ eta: float = 0.0,
645
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
646
+ latents: Optional[torch.FloatTensor] = None,
647
+ prompt_embeds: Optional[torch.FloatTensor] = None,
648
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
649
+ output_type: Optional[str] = "pil",
650
+ return_dict: bool = True,
651
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
652
+ guidance_rescale: float = 0.0,
653
+ clip_skip: Optional[int] = None,
654
+ **kwargs,
655
+ ):
656
+ r"""
657
+ The call function to the pipeline for generation.
658
+
659
+ Args:
660
+ prompt (`str` or `List[str]`, *optional*):
661
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
662
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
663
+ The height in pixels of the generated image.
664
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
665
+ The width in pixels of the generated image.
666
+ num_inference_steps (`int`, *optional*, defaults to 50):
667
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
668
+ expense of slower inference.
669
+ timesteps (`List[int]`, *optional*):
670
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
671
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
672
+ passed will be used. Must be in descending order.
673
+ guidance_scale (`float`, *optional*, defaults to 7.5):
674
+ A higher guidance scale value encourages the model to generate images closely linked to the text
675
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
676
+ negative_prompt (`str` or `List[str]`, *optional*):
677
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
678
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
679
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
680
+ The number of images to generate per prompt.
681
+ eta (`float`, *optional*, defaults to 0.0):
682
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
683
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
684
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
685
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
686
+ generation deterministic.
687
+ latents (`torch.FloatTensor`, *optional*):
688
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
689
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
690
+ tensor is generated by sampling using the supplied random `generator`.
691
+ prompt_embeds (`torch.FloatTensor`, *optional*):
692
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
693
+ provided, text embeddings are generated from the `prompt` input argument.
694
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
695
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
696
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
697
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
698
+ output_type (`str`, *optional*, defaults to `"pil"`):
699
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
700
+ return_dict (`bool`, *optional*, defaults to `True`):
701
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
702
+ plain tuple.
703
+ cross_attention_kwargs (`dict`, *optional*):
704
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
705
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
706
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
707
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
708
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
709
+ using zero terminal SNR.
710
+ clip_skip (`int`, *optional*):
711
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
712
+ the output of the pre-final layer will be used for computing the prompt embeddings.
713
+
714
+ Examples:
715
+
716
+ Returns:
717
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
718
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
719
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
720
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
721
+ "not-safe-for-work" (nsfw) content.
722
+ """
723
+
724
+ # 0. Default height and width to unet
725
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
726
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
727
+ # to deal with lora scaling and other possible forward hooks
728
+
729
+ # 1. Check inputs. Raise error if not correct
730
+ self.check_inputs(
731
+ prompt,
732
+ height,
733
+ width,
734
+ negative_prompt,
735
+ prompt_embeds,
736
+ negative_prompt_embeds,
737
+ )
738
+
739
+ self._guidance_scale = guidance_scale
740
+ self._guidance_rescale = guidance_rescale
741
+ self._clip_skip = clip_skip
742
+ self._cross_attention_kwargs = cross_attention_kwargs
743
+ self._interrupt = False
744
+
745
+ # 2. Define call parameters
746
+ if prompt is not None and isinstance(prompt, str):
747
+ batch_size = 1
748
+ elif prompt is not None and isinstance(prompt, list):
749
+ batch_size = len(prompt)
750
+ else:
751
+ batch_size = prompt_embeds.shape[0]
752
+
753
+ device = self._execution_device
754
+
755
+ # 3. Encode input prompt
756
+ lora_scale = (
757
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
758
+ )
759
+
760
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
761
+ prompt,
762
+ device,
763
+ num_images_per_prompt,
764
+ self.do_classifier_free_guidance,
765
+ negative_prompt,
766
+ prompt_embeds=prompt_embeds,
767
+ negative_prompt_embeds=negative_prompt_embeds,
768
+ lora_scale=lora_scale,
769
+ clip_skip=self.clip_skip,
770
+ )
771
+
772
+ # For classifier free guidance, we need to do two forward passes.
773
+ # Here we concatenate the unconditional and text embeddings into a single batch
774
+ # to avoid doing two forward passes
775
+ if self.do_classifier_free_guidance:
776
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
777
+
778
+ # 4. Prepare timesteps
779
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
780
+
781
+ # 5. Prepare latent variables
782
+ num_channels_latents = self.unet.config.in_channels
783
+ latents = self.prepare_latents(
784
+ batch_size * num_images_per_prompt,
785
+ num_channels_latents,
786
+ height,
787
+ width,
788
+ prompt_embeds.dtype,
789
+ device,
790
+ generator,
791
+ latents,
792
+ )
793
+
794
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
795
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
796
+
797
+ # 6.2 Optionally get Guidance Scale Embedding
798
+ timestep_cond = None
799
+ if self.unet.config.time_cond_proj_dim is not None:
800
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
801
+ timestep_cond = self.get_guidance_scale_embedding(
802
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
803
+ ).to(device=device, dtype=latents.dtype)
804
+
805
+ # 7. Denoising loop
806
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
807
+ self._num_timesteps = len(timesteps)
808
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
809
+ for i, t in enumerate(timesteps):
810
+ if self.interrupt:
811
+ continue
812
+
813
+ # expand the latents if we are doing classifier free guidance
814
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
815
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
816
+
817
+ # predict the noise residual
818
+ noise_pred = self.unet(
819
+ latent_model_input,
820
+ t,
821
+ encoder_hidden_states=prompt_embeds,
822
+ timestep_cond=timestep_cond,
823
+ cross_attention_kwargs=self.cross_attention_kwargs,
824
+ return_dict=False,
825
+ )[0]
826
+
827
+ # perform guidance
828
+ if self.do_classifier_free_guidance:
829
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
830
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
831
+
832
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
833
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
834
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
835
+
836
+ # compute the previous noisy sample x_t -> x_t-1
837
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
838
+
839
+ # call the callback, if provided
840
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
841
+ progress_bar.update()
842
+
843
+ if not output_type == "latent":
844
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
845
+ 0
846
+ ]
847
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
848
+ else:
849
+ image = latents
850
+ has_nsfw_concept = None
851
+
852
+ if has_nsfw_concept is None:
853
+ do_denormalize = [True] * image.shape[0]
854
+ else:
855
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
856
+
857
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
858
+
859
+ # Offload all models
860
+ self.maybe_free_model_hooks()
861
+
862
+ if not return_dict:
863
+ return (image, has_nsfw_concept)
864
+
865
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/iadb.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union
2
+
3
+ import torch
4
+
5
+ from diffusers import DiffusionPipeline
6
+ from diffusers.configuration_utils import ConfigMixin
7
+ from diffusers.pipelines.pipeline_utils import ImagePipelineOutput
8
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
9
+
10
+
11
+ class IADBScheduler(SchedulerMixin, ConfigMixin):
12
+ """
13
+ IADBScheduler is a scheduler for the Iterative α-(de)Blending denoising method. It is simple and minimalist.
14
+
15
+ For more details, see the original paper: https://arxiv.org/abs/2305.03486 and the blog post: https://ggx-research.github.io/publication/2023/05/10/publication-iadb.html
16
+ """
17
+
18
+ def step(
19
+ self,
20
+ model_output: torch.FloatTensor,
21
+ timestep: int,
22
+ x_alpha: torch.FloatTensor,
23
+ ) -> torch.FloatTensor:
24
+ """
25
+ Predict the sample at the previous timestep by reversing the ODE. Core function to propagate the diffusion
26
+ process from the learned model outputs (most often the predicted noise).
27
+
28
+ Args:
29
+ model_output (`torch.FloatTensor`): direct output from learned diffusion model. It is the direction from x0 to x1.
30
+ timestep (`float`): current timestep in the diffusion chain.
31
+ x_alpha (`torch.FloatTensor`): x_alpha sample for the current timestep
32
+
33
+ Returns:
34
+ `torch.FloatTensor`: the sample at the previous timestep
35
+
36
+ """
37
+ if self.num_inference_steps is None:
38
+ raise ValueError(
39
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
40
+ )
41
+
42
+ alpha = timestep / self.num_inference_steps
43
+ alpha_next = (timestep + 1) / self.num_inference_steps
44
+
45
+ d = model_output
46
+
47
+ x_alpha = x_alpha + (alpha_next - alpha) * d
48
+
49
+ return x_alpha
50
+
51
+ def set_timesteps(self, num_inference_steps: int):
52
+ self.num_inference_steps = num_inference_steps
53
+
54
+ def add_noise(
55
+ self,
56
+ original_samples: torch.FloatTensor,
57
+ noise: torch.FloatTensor,
58
+ alpha: torch.FloatTensor,
59
+ ) -> torch.FloatTensor:
60
+ return original_samples * alpha + noise * (1 - alpha)
61
+
62
+ def __len__(self):
63
+ return self.config.num_train_timesteps
64
+
65
+
66
+ class IADBPipeline(DiffusionPipeline):
67
+ r"""
68
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
69
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
70
+
71
+ Parameters:
72
+ unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
73
+ scheduler ([`SchedulerMixin`]):
74
+ A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
75
+ [`DDPMScheduler`], or [`DDIMScheduler`].
76
+ """
77
+
78
+ def __init__(self, unet, scheduler):
79
+ super().__init__()
80
+
81
+ self.register_modules(unet=unet, scheduler=scheduler)
82
+
83
+ @torch.no_grad()
84
+ def __call__(
85
+ self,
86
+ batch_size: int = 1,
87
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
88
+ num_inference_steps: int = 50,
89
+ output_type: Optional[str] = "pil",
90
+ return_dict: bool = True,
91
+ ) -> Union[ImagePipelineOutput, Tuple]:
92
+ r"""
93
+ Args:
94
+ batch_size (`int`, *optional*, defaults to 1):
95
+ The number of images to generate.
96
+ num_inference_steps (`int`, *optional*, defaults to 50):
97
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
98
+ expense of slower inference.
99
+ output_type (`str`, *optional*, defaults to `"pil"`):
100
+ The output format of the generate image. Choose between
101
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
102
+ return_dict (`bool`, *optional*, defaults to `True`):
103
+ Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
104
+
105
+ Returns:
106
+ [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
107
+ True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
108
+ """
109
+
110
+ # Sample gaussian noise to begin loop
111
+ if isinstance(self.unet.config.sample_size, int):
112
+ image_shape = (
113
+ batch_size,
114
+ self.unet.config.in_channels,
115
+ self.unet.config.sample_size,
116
+ self.unet.config.sample_size,
117
+ )
118
+ else:
119
+ image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
120
+
121
+ if isinstance(generator, list) and len(generator) != batch_size:
122
+ raise ValueError(
123
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
124
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
125
+ )
126
+
127
+ image = torch.randn(image_shape, generator=generator, device=self.device, dtype=self.unet.dtype)
128
+
129
+ # set step values
130
+ self.scheduler.set_timesteps(num_inference_steps)
131
+ x_alpha = image.clone()
132
+ for t in self.progress_bar(range(num_inference_steps)):
133
+ alpha = t / num_inference_steps
134
+
135
+ # 1. predict noise model_output
136
+ model_output = self.unet(x_alpha, torch.tensor(alpha, device=x_alpha.device)).sample
137
+
138
+ # 2. step
139
+ x_alpha = self.scheduler.step(model_output, t, x_alpha)
140
+
141
+ image = (x_alpha * 0.5 + 0.5).clamp(0, 1)
142
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
143
+ if output_type == "pil":
144
+ image = self.numpy_to_pil(image)
145
+
146
+ if not return_dict:
147
+ return (image,)
148
+
149
+ return ImagePipelineOutput(images=image)
v0.26.3/imagic_stable_diffusion.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ modeled after the textual_inversion.py / train_dreambooth.py and the work
3
+ of justinpinkney here: https://github.com/justinpinkney/stable-diffusion/blob/main/notebooks/imagic.ipynb
4
+ """
5
+ import inspect
6
+ import warnings
7
+ from typing import List, Optional, Union
8
+
9
+ import numpy as np
10
+ import PIL.Image
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from accelerate import Accelerator
14
+
15
+ # TODO: remove and import from diffusers.utils when the new version of diffusers is released
16
+ from packaging import version
17
+ from tqdm.auto import tqdm
18
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
19
+
20
+ from diffusers import DiffusionPipeline
21
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
22
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
23
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
24
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
25
+ from diffusers.utils import logging
26
+
27
+
28
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
29
+ PIL_INTERPOLATION = {
30
+ "linear": PIL.Image.Resampling.BILINEAR,
31
+ "bilinear": PIL.Image.Resampling.BILINEAR,
32
+ "bicubic": PIL.Image.Resampling.BICUBIC,
33
+ "lanczos": PIL.Image.Resampling.LANCZOS,
34
+ "nearest": PIL.Image.Resampling.NEAREST,
35
+ }
36
+ else:
37
+ PIL_INTERPOLATION = {
38
+ "linear": PIL.Image.LINEAR,
39
+ "bilinear": PIL.Image.BILINEAR,
40
+ "bicubic": PIL.Image.BICUBIC,
41
+ "lanczos": PIL.Image.LANCZOS,
42
+ "nearest": PIL.Image.NEAREST,
43
+ }
44
+ # ------------------------------------------------------------------------------
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ def preprocess(image):
50
+ w, h = image.size
51
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
52
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
53
+ image = np.array(image).astype(np.float32) / 255.0
54
+ image = image[None].transpose(0, 3, 1, 2)
55
+ image = torch.from_numpy(image)
56
+ return 2.0 * image - 1.0
57
+
58
+
59
+ class ImagicStableDiffusionPipeline(DiffusionPipeline):
60
+ r"""
61
+ Pipeline for imagic image editing.
62
+ See paper here: https://arxiv.org/pdf/2210.09276.pdf
63
+
64
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
65
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
66
+ Args:
67
+ vae ([`AutoencoderKL`]):
68
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
69
+ text_encoder ([`CLIPTextModel`]):
70
+ Frozen text-encoder. Stable Diffusion uses the text portion of
71
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
72
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
73
+ tokenizer (`CLIPTokenizer`):
74
+ Tokenizer of class
75
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
76
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
77
+ scheduler ([`SchedulerMixin`]):
78
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
79
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
80
+ safety_checker ([`StableDiffusionSafetyChecker`]):
81
+ Classification module that estimates whether generated images could be considered offsensive or harmful.
82
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
83
+ feature_extractor ([`CLIPImageProcessor`]):
84
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ vae: AutoencoderKL,
90
+ text_encoder: CLIPTextModel,
91
+ tokenizer: CLIPTokenizer,
92
+ unet: UNet2DConditionModel,
93
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
94
+ safety_checker: StableDiffusionSafetyChecker,
95
+ feature_extractor: CLIPImageProcessor,
96
+ ):
97
+ super().__init__()
98
+ self.register_modules(
99
+ vae=vae,
100
+ text_encoder=text_encoder,
101
+ tokenizer=tokenizer,
102
+ unet=unet,
103
+ scheduler=scheduler,
104
+ safety_checker=safety_checker,
105
+ feature_extractor=feature_extractor,
106
+ )
107
+
108
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
109
+ r"""
110
+ Enable sliced attention computation.
111
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
112
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
113
+ Args:
114
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
115
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
116
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
117
+ `attention_head_dim` must be a multiple of `slice_size`.
118
+ """
119
+ if slice_size == "auto":
120
+ # half the attention head size is usually a good trade-off between
121
+ # speed and memory
122
+ slice_size = self.unet.config.attention_head_dim // 2
123
+ self.unet.set_attention_slice(slice_size)
124
+
125
+ def disable_attention_slicing(self):
126
+ r"""
127
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
128
+ back to computing attention in one step.
129
+ """
130
+ # set slice_size = `None` to disable `attention slicing`
131
+ self.enable_attention_slicing(None)
132
+
133
+ def train(
134
+ self,
135
+ prompt: Union[str, List[str]],
136
+ image: Union[torch.FloatTensor, PIL.Image.Image],
137
+ height: Optional[int] = 512,
138
+ width: Optional[int] = 512,
139
+ generator: Optional[torch.Generator] = None,
140
+ embedding_learning_rate: float = 0.001,
141
+ diffusion_model_learning_rate: float = 2e-6,
142
+ text_embedding_optimization_steps: int = 500,
143
+ model_fine_tuning_optimization_steps: int = 1000,
144
+ **kwargs,
145
+ ):
146
+ r"""
147
+ Function invoked when calling the pipeline for generation.
148
+ Args:
149
+ prompt (`str` or `List[str]`):
150
+ The prompt or prompts to guide the image generation.
151
+ height (`int`, *optional*, defaults to 512):
152
+ The height in pixels of the generated image.
153
+ width (`int`, *optional*, defaults to 512):
154
+ The width in pixels of the generated image.
155
+ num_inference_steps (`int`, *optional*, defaults to 50):
156
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
157
+ expense of slower inference.
158
+ guidance_scale (`float`, *optional*, defaults to 7.5):
159
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
160
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
161
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
162
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
163
+ usually at the expense of lower image quality.
164
+ eta (`float`, *optional*, defaults to 0.0):
165
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
166
+ [`schedulers.DDIMScheduler`], will be ignored for others.
167
+ generator (`torch.Generator`, *optional*):
168
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
169
+ deterministic.
170
+ latents (`torch.FloatTensor`, *optional*):
171
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
172
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
173
+ tensor will ge generated by sampling using the supplied random `generator`.
174
+ output_type (`str`, *optional*, defaults to `"pil"`):
175
+ The output format of the generate image. Choose between
176
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
177
+ return_dict (`bool`, *optional*, defaults to `True`):
178
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
179
+ plain tuple.
180
+ Returns:
181
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
182
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
183
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
184
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
185
+ (nsfw) content, according to the `safety_checker`.
186
+ """
187
+ accelerator = Accelerator(
188
+ gradient_accumulation_steps=1,
189
+ mixed_precision="fp16",
190
+ )
191
+
192
+ if "torch_device" in kwargs:
193
+ device = kwargs.pop("torch_device")
194
+ warnings.warn(
195
+ "`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0."
196
+ " Consider using `pipe.to(torch_device)` instead."
197
+ )
198
+
199
+ if device is None:
200
+ device = "cuda" if torch.cuda.is_available() else "cpu"
201
+ self.to(device)
202
+
203
+ if height % 8 != 0 or width % 8 != 0:
204
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
205
+
206
+ # Freeze vae and unet
207
+ self.vae.requires_grad_(False)
208
+ self.unet.requires_grad_(False)
209
+ self.text_encoder.requires_grad_(False)
210
+ self.unet.eval()
211
+ self.vae.eval()
212
+ self.text_encoder.eval()
213
+
214
+ if accelerator.is_main_process:
215
+ accelerator.init_trackers(
216
+ "imagic",
217
+ config={
218
+ "embedding_learning_rate": embedding_learning_rate,
219
+ "text_embedding_optimization_steps": text_embedding_optimization_steps,
220
+ },
221
+ )
222
+
223
+ # get text embeddings for prompt
224
+ text_input = self.tokenizer(
225
+ prompt,
226
+ padding="max_length",
227
+ max_length=self.tokenizer.model_max_length,
228
+ truncation=True,
229
+ return_tensors="pt",
230
+ )
231
+ text_embeddings = torch.nn.Parameter(
232
+ self.text_encoder(text_input.input_ids.to(self.device))[0], requires_grad=True
233
+ )
234
+ text_embeddings = text_embeddings.detach()
235
+ text_embeddings.requires_grad_()
236
+ text_embeddings_orig = text_embeddings.clone()
237
+
238
+ # Initialize the optimizer
239
+ optimizer = torch.optim.Adam(
240
+ [text_embeddings], # only optimize the embeddings
241
+ lr=embedding_learning_rate,
242
+ )
243
+
244
+ if isinstance(image, PIL.Image.Image):
245
+ image = preprocess(image)
246
+
247
+ latents_dtype = text_embeddings.dtype
248
+ image = image.to(device=self.device, dtype=latents_dtype)
249
+ init_latent_image_dist = self.vae.encode(image).latent_dist
250
+ image_latents = init_latent_image_dist.sample(generator=generator)
251
+ image_latents = 0.18215 * image_latents
252
+
253
+ progress_bar = tqdm(range(text_embedding_optimization_steps), disable=not accelerator.is_local_main_process)
254
+ progress_bar.set_description("Steps")
255
+
256
+ global_step = 0
257
+
258
+ logger.info("First optimizing the text embedding to better reconstruct the init image")
259
+ for _ in range(text_embedding_optimization_steps):
260
+ with accelerator.accumulate(text_embeddings):
261
+ # Sample noise that we'll add to the latents
262
+ noise = torch.randn(image_latents.shape).to(image_latents.device)
263
+ timesteps = torch.randint(1000, (1,), device=image_latents.device)
264
+
265
+ # Add noise to the latents according to the noise magnitude at each timestep
266
+ # (this is the forward diffusion process)
267
+ noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
268
+
269
+ # Predict the noise residual
270
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
271
+
272
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
273
+ accelerator.backward(loss)
274
+
275
+ optimizer.step()
276
+ optimizer.zero_grad()
277
+
278
+ # Checks if the accelerator has performed an optimization step behind the scenes
279
+ if accelerator.sync_gradients:
280
+ progress_bar.update(1)
281
+ global_step += 1
282
+
283
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
284
+ progress_bar.set_postfix(**logs)
285
+ accelerator.log(logs, step=global_step)
286
+
287
+ accelerator.wait_for_everyone()
288
+
289
+ text_embeddings.requires_grad_(False)
290
+
291
+ # Now we fine tune the unet to better reconstruct the image
292
+ self.unet.requires_grad_(True)
293
+ self.unet.train()
294
+ optimizer = torch.optim.Adam(
295
+ self.unet.parameters(), # only optimize unet
296
+ lr=diffusion_model_learning_rate,
297
+ )
298
+ progress_bar = tqdm(range(model_fine_tuning_optimization_steps), disable=not accelerator.is_local_main_process)
299
+
300
+ logger.info("Next fine tuning the entire model to better reconstruct the init image")
301
+ for _ in range(model_fine_tuning_optimization_steps):
302
+ with accelerator.accumulate(self.unet.parameters()):
303
+ # Sample noise that we'll add to the latents
304
+ noise = torch.randn(image_latents.shape).to(image_latents.device)
305
+ timesteps = torch.randint(1000, (1,), device=image_latents.device)
306
+
307
+ # Add noise to the latents according to the noise magnitude at each timestep
308
+ # (this is the forward diffusion process)
309
+ noisy_latents = self.scheduler.add_noise(image_latents, noise, timesteps)
310
+
311
+ # Predict the noise residual
312
+ noise_pred = self.unet(noisy_latents, timesteps, text_embeddings).sample
313
+
314
+ loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
315
+ accelerator.backward(loss)
316
+
317
+ optimizer.step()
318
+ optimizer.zero_grad()
319
+
320
+ # Checks if the accelerator has performed an optimization step behind the scenes
321
+ if accelerator.sync_gradients:
322
+ progress_bar.update(1)
323
+ global_step += 1
324
+
325
+ logs = {"loss": loss.detach().item()} # , "lr": lr_scheduler.get_last_lr()[0]}
326
+ progress_bar.set_postfix(**logs)
327
+ accelerator.log(logs, step=global_step)
328
+
329
+ accelerator.wait_for_everyone()
330
+ self.text_embeddings_orig = text_embeddings_orig
331
+ self.text_embeddings = text_embeddings
332
+
333
+ @torch.no_grad()
334
+ def __call__(
335
+ self,
336
+ alpha: float = 1.2,
337
+ height: Optional[int] = 512,
338
+ width: Optional[int] = 512,
339
+ num_inference_steps: Optional[int] = 50,
340
+ generator: Optional[torch.Generator] = None,
341
+ output_type: Optional[str] = "pil",
342
+ return_dict: bool = True,
343
+ guidance_scale: float = 7.5,
344
+ eta: float = 0.0,
345
+ ):
346
+ r"""
347
+ Function invoked when calling the pipeline for generation.
348
+ Args:
349
+ prompt (`str` or `List[str]`):
350
+ The prompt or prompts to guide the image generation.
351
+ height (`int`, *optional*, defaults to 512):
352
+ The height in pixels of the generated image.
353
+ width (`int`, *optional*, defaults to 512):
354
+ The width in pixels of the generated image.
355
+ num_inference_steps (`int`, *optional*, defaults to 50):
356
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
357
+ expense of slower inference.
358
+ guidance_scale (`float`, *optional*, defaults to 7.5):
359
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
360
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
361
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
362
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
363
+ usually at the expense of lower image quality.
364
+ eta (`float`, *optional*, defaults to 0.0):
365
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
366
+ [`schedulers.DDIMScheduler`], will be ignored for others.
367
+ generator (`torch.Generator`, *optional*):
368
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
369
+ deterministic.
370
+ latents (`torch.FloatTensor`, *optional*):
371
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
372
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
373
+ tensor will ge generated by sampling using the supplied random `generator`.
374
+ output_type (`str`, *optional*, defaults to `"pil"`):
375
+ The output format of the generate image. Choose between
376
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`.
377
+ return_dict (`bool`, *optional*, defaults to `True`):
378
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
379
+ plain tuple.
380
+ Returns:
381
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
382
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
383
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
384
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
385
+ (nsfw) content, according to the `safety_checker`.
386
+ """
387
+ if height % 8 != 0 or width % 8 != 0:
388
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
389
+ if self.text_embeddings is None:
390
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
391
+ if self.text_embeddings_orig is None:
392
+ raise ValueError("Please run the pipe.train() before trying to generate an image.")
393
+
394
+ text_embeddings = alpha * self.text_embeddings_orig + (1 - alpha) * self.text_embeddings
395
+
396
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
397
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
398
+ # corresponds to doing no classifier free guidance.
399
+ do_classifier_free_guidance = guidance_scale > 1.0
400
+ # get unconditional embeddings for classifier free guidance
401
+ if do_classifier_free_guidance:
402
+ uncond_tokens = [""]
403
+ max_length = self.tokenizer.model_max_length
404
+ uncond_input = self.tokenizer(
405
+ uncond_tokens,
406
+ padding="max_length",
407
+ max_length=max_length,
408
+ truncation=True,
409
+ return_tensors="pt",
410
+ )
411
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
412
+
413
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
414
+ seq_len = uncond_embeddings.shape[1]
415
+ uncond_embeddings = uncond_embeddings.view(1, seq_len, -1)
416
+
417
+ # For classifier free guidance, we need to do two forward passes.
418
+ # Here we concatenate the unconditional and text embeddings into a single batch
419
+ # to avoid doing two forward passes
420
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
421
+
422
+ # get the initial random noise unless the user supplied it
423
+
424
+ # Unlike in other pipelines, latents need to be generated in the target device
425
+ # for 1-to-1 results reproducibility with the CompVis implementation.
426
+ # However this currently doesn't work in `mps`.
427
+ latents_shape = (1, self.unet.config.in_channels, height // 8, width // 8)
428
+ latents_dtype = text_embeddings.dtype
429
+ if self.device.type == "mps":
430
+ # randn does not exist on mps
431
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
432
+ self.device
433
+ )
434
+ else:
435
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
436
+
437
+ # set timesteps
438
+ self.scheduler.set_timesteps(num_inference_steps)
439
+
440
+ # Some schedulers like PNDM have timesteps as arrays
441
+ # It's more optimized to move all timesteps to correct device beforehand
442
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
443
+
444
+ # scale the initial noise by the standard deviation required by the scheduler
445
+ latents = latents * self.scheduler.init_noise_sigma
446
+
447
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
448
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
449
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
450
+ # and should be between [0, 1]
451
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
452
+ extra_step_kwargs = {}
453
+ if accepts_eta:
454
+ extra_step_kwargs["eta"] = eta
455
+
456
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
457
+ # expand the latents if we are doing classifier free guidance
458
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
459
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
460
+
461
+ # predict the noise residual
462
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
463
+
464
+ # perform guidance
465
+ if do_classifier_free_guidance:
466
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
467
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
468
+
469
+ # compute the previous noisy sample x_t -> x_t-1
470
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
471
+
472
+ latents = 1 / 0.18215 * latents
473
+ image = self.vae.decode(latents).sample
474
+
475
+ image = (image / 2 + 0.5).clamp(0, 1)
476
+
477
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
478
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
479
+
480
+ if self.safety_checker is not None:
481
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
482
+ self.device
483
+ )
484
+ image, has_nsfw_concept = self.safety_checker(
485
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
486
+ )
487
+ else:
488
+ has_nsfw_concept = None
489
+
490
+ if output_type == "pil":
491
+ image = self.numpy_to_pil(image)
492
+
493
+ if not return_dict:
494
+ return (image, has_nsfw_concept)
495
+
496
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/img2img_inpainting.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Tuple, Union
3
+
4
+ import numpy as np
5
+ import PIL.Image
6
+ import torch
7
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
8
+
9
+ from diffusers import DiffusionPipeline
10
+ from diffusers.configuration_utils import FrozenDict
11
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
12
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
15
+ from diffusers.utils import deprecate, logging
16
+
17
+
18
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
19
+
20
+
21
+ def prepare_mask_and_masked_image(image, mask):
22
+ image = np.array(image.convert("RGB"))
23
+ image = image[None].transpose(0, 3, 1, 2)
24
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
25
+
26
+ mask = np.array(mask.convert("L"))
27
+ mask = mask.astype(np.float32) / 255.0
28
+ mask = mask[None, None]
29
+ mask[mask < 0.5] = 0
30
+ mask[mask >= 0.5] = 1
31
+ mask = torch.from_numpy(mask)
32
+
33
+ masked_image = image * (mask < 0.5)
34
+
35
+ return mask, masked_image
36
+
37
+
38
+ def check_size(image, height, width):
39
+ if isinstance(image, PIL.Image.Image):
40
+ w, h = image.size
41
+ elif isinstance(image, torch.Tensor):
42
+ *_, h, w = image.shape
43
+
44
+ if h != height or w != width:
45
+ raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
46
+
47
+
48
+ def overlay_inner_image(image, inner_image, paste_offset: Tuple[int] = (0, 0)):
49
+ inner_image = inner_image.convert("RGBA")
50
+ image = image.convert("RGB")
51
+
52
+ image.paste(inner_image, paste_offset, inner_image)
53
+ image = image.convert("RGB")
54
+
55
+ return image
56
+
57
+
58
+ class ImageToImageInpaintingPipeline(DiffusionPipeline):
59
+ r"""
60
+ Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
61
+
62
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
63
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
64
+
65
+ Args:
66
+ vae ([`AutoencoderKL`]):
67
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
+ text_encoder ([`CLIPTextModel`]):
69
+ Frozen text-encoder. Stable Diffusion uses the text portion of
70
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
+ tokenizer (`CLIPTokenizer`):
73
+ Tokenizer of class
74
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
+ scheduler ([`SchedulerMixin`]):
77
+ A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
+ safety_checker ([`StableDiffusionSafetyChecker`]):
80
+ Classification module that estimates whether generated images could be considered offensive or harmful.
81
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
+ feature_extractor ([`CLIPImageProcessor`]):
83
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ vae: AutoencoderKL,
89
+ text_encoder: CLIPTextModel,
90
+ tokenizer: CLIPTokenizer,
91
+ unet: UNet2DConditionModel,
92
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
93
+ safety_checker: StableDiffusionSafetyChecker,
94
+ feature_extractor: CLIPImageProcessor,
95
+ ):
96
+ super().__init__()
97
+
98
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
99
+ deprecation_message = (
100
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
101
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
102
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
103
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
104
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
105
+ " file"
106
+ )
107
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
108
+ new_config = dict(scheduler.config)
109
+ new_config["steps_offset"] = 1
110
+ scheduler._internal_dict = FrozenDict(new_config)
111
+
112
+ if safety_checker is None:
113
+ logger.warning(
114
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
115
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
116
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
117
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
118
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
119
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
120
+ )
121
+
122
+ self.register_modules(
123
+ vae=vae,
124
+ text_encoder=text_encoder,
125
+ tokenizer=tokenizer,
126
+ unet=unet,
127
+ scheduler=scheduler,
128
+ safety_checker=safety_checker,
129
+ feature_extractor=feature_extractor,
130
+ )
131
+
132
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
133
+ r"""
134
+ Enable sliced attention computation.
135
+
136
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
137
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
138
+
139
+ Args:
140
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
141
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
142
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
143
+ `attention_head_dim` must be a multiple of `slice_size`.
144
+ """
145
+ if slice_size == "auto":
146
+ # half the attention head size is usually a good trade-off between
147
+ # speed and memory
148
+ slice_size = self.unet.config.attention_head_dim // 2
149
+ self.unet.set_attention_slice(slice_size)
150
+
151
+ def disable_attention_slicing(self):
152
+ r"""
153
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
154
+ back to computing attention in one step.
155
+ """
156
+ # set slice_size = `None` to disable `attention slicing`
157
+ self.enable_attention_slicing(None)
158
+
159
+ @torch.no_grad()
160
+ def __call__(
161
+ self,
162
+ prompt: Union[str, List[str]],
163
+ image: Union[torch.FloatTensor, PIL.Image.Image],
164
+ inner_image: Union[torch.FloatTensor, PIL.Image.Image],
165
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
166
+ height: int = 512,
167
+ width: int = 512,
168
+ num_inference_steps: int = 50,
169
+ guidance_scale: float = 7.5,
170
+ negative_prompt: Optional[Union[str, List[str]]] = None,
171
+ num_images_per_prompt: Optional[int] = 1,
172
+ eta: float = 0.0,
173
+ generator: Optional[torch.Generator] = None,
174
+ latents: Optional[torch.FloatTensor] = None,
175
+ output_type: Optional[str] = "pil",
176
+ return_dict: bool = True,
177
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
178
+ callback_steps: int = 1,
179
+ **kwargs,
180
+ ):
181
+ r"""
182
+ Function invoked when calling the pipeline for generation.
183
+
184
+ Args:
185
+ prompt (`str` or `List[str]`):
186
+ The prompt or prompts to guide the image generation.
187
+ image (`torch.Tensor` or `PIL.Image.Image`):
188
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
189
+ be masked out with `mask_image` and repainted according to `prompt`.
190
+ inner_image (`torch.Tensor` or `PIL.Image.Image`):
191
+ `Image`, or tensor representing an image batch which will be overlayed onto `image`. Non-transparent
192
+ regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
193
+ the last channel representing the alpha channel, which will be used to blend `inner_image` with
194
+ `image`. If not provided, it will be forcibly cast to RGBA.
195
+ mask_image (`PIL.Image.Image`):
196
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
197
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
198
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
199
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
200
+ height (`int`, *optional*, defaults to 512):
201
+ The height in pixels of the generated image.
202
+ width (`int`, *optional*, defaults to 512):
203
+ The width in pixels of the generated image.
204
+ num_inference_steps (`int`, *optional*, defaults to 50):
205
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
206
+ expense of slower inference.
207
+ guidance_scale (`float`, *optional*, defaults to 7.5):
208
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
209
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
210
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
211
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
212
+ usually at the expense of lower image quality.
213
+ negative_prompt (`str` or `List[str]`, *optional*):
214
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
215
+ if `guidance_scale` is less than `1`).
216
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
217
+ The number of images to generate per prompt.
218
+ eta (`float`, *optional*, defaults to 0.0):
219
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
220
+ [`schedulers.DDIMScheduler`], will be ignored for others.
221
+ generator (`torch.Generator`, *optional*):
222
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
223
+ deterministic.
224
+ latents (`torch.FloatTensor`, *optional*):
225
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
226
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
227
+ tensor will ge generated by sampling using the supplied random `generator`.
228
+ output_type (`str`, *optional*, defaults to `"pil"`):
229
+ The output format of the generate image. Choose between
230
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
231
+ return_dict (`bool`, *optional*, defaults to `True`):
232
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
233
+ plain tuple.
234
+ callback (`Callable`, *optional*):
235
+ A function that will be called every `callback_steps` steps during inference. The function will be
236
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
237
+ callback_steps (`int`, *optional*, defaults to 1):
238
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
239
+ called at every step.
240
+
241
+ Returns:
242
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
243
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
244
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
245
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
246
+ (nsfw) content, according to the `safety_checker`.
247
+ """
248
+
249
+ if isinstance(prompt, str):
250
+ batch_size = 1
251
+ elif isinstance(prompt, list):
252
+ batch_size = len(prompt)
253
+ else:
254
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
255
+
256
+ if height % 8 != 0 or width % 8 != 0:
257
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
258
+
259
+ if (callback_steps is None) or (
260
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
261
+ ):
262
+ raise ValueError(
263
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
264
+ f" {type(callback_steps)}."
265
+ )
266
+
267
+ # check if input sizes are correct
268
+ check_size(image, height, width)
269
+ check_size(inner_image, height, width)
270
+ check_size(mask_image, height, width)
271
+
272
+ # get prompt text embeddings
273
+ text_inputs = self.tokenizer(
274
+ prompt,
275
+ padding="max_length",
276
+ max_length=self.tokenizer.model_max_length,
277
+ return_tensors="pt",
278
+ )
279
+ text_input_ids = text_inputs.input_ids
280
+
281
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
282
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
283
+ logger.warning(
284
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
285
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
286
+ )
287
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
288
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
289
+
290
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
291
+ bs_embed, seq_len, _ = text_embeddings.shape
292
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
293
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
294
+
295
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
296
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
297
+ # corresponds to doing no classifier free guidance.
298
+ do_classifier_free_guidance = guidance_scale > 1.0
299
+ # get unconditional embeddings for classifier free guidance
300
+ if do_classifier_free_guidance:
301
+ uncond_tokens: List[str]
302
+ if negative_prompt is None:
303
+ uncond_tokens = [""]
304
+ elif type(prompt) is not type(negative_prompt):
305
+ raise TypeError(
306
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
307
+ f" {type(prompt)}."
308
+ )
309
+ elif isinstance(negative_prompt, str):
310
+ uncond_tokens = [negative_prompt]
311
+ elif batch_size != len(negative_prompt):
312
+ raise ValueError(
313
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
314
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
315
+ " the batch size of `prompt`."
316
+ )
317
+ else:
318
+ uncond_tokens = negative_prompt
319
+
320
+ max_length = text_input_ids.shape[-1]
321
+ uncond_input = self.tokenizer(
322
+ uncond_tokens,
323
+ padding="max_length",
324
+ max_length=max_length,
325
+ truncation=True,
326
+ return_tensors="pt",
327
+ )
328
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
329
+
330
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
331
+ seq_len = uncond_embeddings.shape[1]
332
+ uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
333
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
334
+
335
+ # For classifier free guidance, we need to do two forward passes.
336
+ # Here we concatenate the unconditional and text embeddings into a single batch
337
+ # to avoid doing two forward passes
338
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
339
+
340
+ # get the initial random noise unless the user supplied it
341
+ # Unlike in other pipelines, latents need to be generated in the target device
342
+ # for 1-to-1 results reproducibility with the CompVis implementation.
343
+ # However this currently doesn't work in `mps`.
344
+ num_channels_latents = self.vae.config.latent_channels
345
+ latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
346
+ latents_dtype = text_embeddings.dtype
347
+ if latents is None:
348
+ if self.device.type == "mps":
349
+ # randn does not exist on mps
350
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
351
+ self.device
352
+ )
353
+ else:
354
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
355
+ else:
356
+ if latents.shape != latents_shape:
357
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
358
+ latents = latents.to(self.device)
359
+
360
+ # overlay the inner image
361
+ image = overlay_inner_image(image, inner_image)
362
+
363
+ # prepare mask and masked_image
364
+ mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
365
+ mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
366
+ masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
367
+
368
+ # resize the mask to latents shape as we concatenate the mask to the latents
369
+ mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
370
+
371
+ # encode the mask image into latents space so we can concatenate it to the latents
372
+ masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
373
+ masked_image_latents = 0.18215 * masked_image_latents
374
+
375
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
376
+ mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
377
+ masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
378
+
379
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
380
+ masked_image_latents = (
381
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
382
+ )
383
+
384
+ num_channels_mask = mask.shape[1]
385
+ num_channels_masked_image = masked_image_latents.shape[1]
386
+
387
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
388
+ raise ValueError(
389
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
390
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
391
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
392
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
393
+ " `pipeline.unet` or your `mask_image` or `image` input."
394
+ )
395
+
396
+ # set timesteps
397
+ self.scheduler.set_timesteps(num_inference_steps)
398
+
399
+ # Some schedulers like PNDM have timesteps as arrays
400
+ # It's more optimized to move all timesteps to correct device beforehand
401
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
402
+
403
+ # scale the initial noise by the standard deviation required by the scheduler
404
+ latents = latents * self.scheduler.init_noise_sigma
405
+
406
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
407
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
408
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
409
+ # and should be between [0, 1]
410
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
411
+ extra_step_kwargs = {}
412
+ if accepts_eta:
413
+ extra_step_kwargs["eta"] = eta
414
+
415
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
416
+ # expand the latents if we are doing classifier free guidance
417
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
418
+
419
+ # concat latents, mask, masked_image_latents in the channel dimension
420
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
421
+
422
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
423
+
424
+ # predict the noise residual
425
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
426
+
427
+ # perform guidance
428
+ if do_classifier_free_guidance:
429
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
430
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
431
+
432
+ # compute the previous noisy sample x_t -> x_t-1
433
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
434
+
435
+ # call the callback, if provided
436
+ if callback is not None and i % callback_steps == 0:
437
+ step_idx = i // getattr(self.scheduler, "order", 1)
438
+ callback(step_idx, t, latents)
439
+
440
+ latents = 1 / 0.18215 * latents
441
+ image = self.vae.decode(latents).sample
442
+
443
+ image = (image / 2 + 0.5).clamp(0, 1)
444
+
445
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
446
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
447
+
448
+ if self.safety_checker is not None:
449
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
450
+ self.device
451
+ )
452
+ image, has_nsfw_concept = self.safety_checker(
453
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
454
+ )
455
+ else:
456
+ has_nsfw_concept = None
457
+
458
+ if output_type == "pil":
459
+ image = self.numpy_to_pil(image)
460
+
461
+ if not return_dict:
462
+ return (image, has_nsfw_concept)
463
+
464
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/instaflow_one_step.py ADDED
@@ -0,0 +1,707 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ from packaging import version
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
21
+
22
+ from diffusers.configuration_utils import FrozenDict
23
+ from diffusers.image_processor import VaeImageProcessor
24
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
25
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
26
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
27
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
28
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
29
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
30
+ from diffusers.schedulers import KarrasDiffusionSchedulers
31
+ from diffusers.utils import (
32
+ deprecate,
33
+ logging,
34
+ )
35
+ from diffusers.utils.torch_utils import randn_tensor
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+
41
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
42
+ """
43
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
44
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
45
+ """
46
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
47
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
48
+ # rescale the results from guidance (fixes overexposure)
49
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
50
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
51
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
52
+ return noise_cfg
53
+
54
+
55
+ class InstaFlowPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):
56
+ r"""
57
+ Pipeline for text-to-image generation using Rectified Flow and Euler discretization.
58
+ This customized pipeline is based on StableDiffusionPipeline from the official Diffusers library (0.21.4)
59
+
60
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
61
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
62
+
63
+ The pipeline also inherits the following loading methods:
64
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
65
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
66
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
67
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
68
+
69
+ Args:
70
+ vae ([`AutoencoderKL`]):
71
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
72
+ text_encoder ([`~transformers.CLIPTextModel`]):
73
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
74
+ tokenizer ([`~transformers.CLIPTokenizer`]):
75
+ A `CLIPTokenizer` to tokenize text.
76
+ unet ([`UNet2DConditionModel`]):
77
+ A `UNet2DConditionModel` to denoise the encoded image latents.
78
+ scheduler ([`SchedulerMixin`]):
79
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
80
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
81
+ safety_checker ([`StableDiffusionSafetyChecker`]):
82
+ Classification module that estimates whether generated images could be considered offensive or harmful.
83
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
84
+ about a model's potential harms.
85
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
86
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
87
+ """
88
+
89
+ model_cpu_offload_seq = "text_encoder->unet->vae"
90
+ _optional_components = ["safety_checker", "feature_extractor"]
91
+ _exclude_from_cpu_offload = ["safety_checker"]
92
+
93
+ def __init__(
94
+ self,
95
+ vae: AutoencoderKL,
96
+ text_encoder: CLIPTextModel,
97
+ tokenizer: CLIPTokenizer,
98
+ unet: UNet2DConditionModel,
99
+ scheduler: KarrasDiffusionSchedulers,
100
+ safety_checker: StableDiffusionSafetyChecker,
101
+ feature_extractor: CLIPImageProcessor,
102
+ requires_safety_checker: bool = True,
103
+ ):
104
+ super().__init__()
105
+
106
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
107
+ deprecation_message = (
108
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
109
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
110
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
111
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
112
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
113
+ " file"
114
+ )
115
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
116
+ new_config = dict(scheduler.config)
117
+ new_config["steps_offset"] = 1
118
+ scheduler._internal_dict = FrozenDict(new_config)
119
+
120
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
121
+ deprecation_message = (
122
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
123
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
124
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
125
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
126
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
127
+ )
128
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
129
+ new_config = dict(scheduler.config)
130
+ new_config["clip_sample"] = False
131
+ scheduler._internal_dict = FrozenDict(new_config)
132
+
133
+ if safety_checker is None and requires_safety_checker:
134
+ logger.warning(
135
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
136
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
137
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
138
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
139
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
140
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
141
+ )
142
+
143
+ if safety_checker is not None and feature_extractor is None:
144
+ raise ValueError(
145
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
146
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
147
+ )
148
+
149
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
150
+ version.parse(unet.config._diffusers_version).base_version
151
+ ) < version.parse("0.9.0.dev0")
152
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
153
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
154
+ deprecation_message = (
155
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
156
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
157
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
158
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
159
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
160
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
161
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
162
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
163
+ " the `unet/config.json` file"
164
+ )
165
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
166
+ new_config = dict(unet.config)
167
+ new_config["sample_size"] = 64
168
+ unet._internal_dict = FrozenDict(new_config)
169
+
170
+ self.register_modules(
171
+ vae=vae,
172
+ text_encoder=text_encoder,
173
+ tokenizer=tokenizer,
174
+ unet=unet,
175
+ scheduler=scheduler,
176
+ safety_checker=safety_checker,
177
+ feature_extractor=feature_extractor,
178
+ )
179
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
180
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
181
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
182
+
183
+ def enable_vae_slicing(self):
184
+ r"""
185
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
186
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
187
+ """
188
+ self.vae.enable_slicing()
189
+
190
+ def disable_vae_slicing(self):
191
+ r"""
192
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
193
+ computing decoding in one step.
194
+ """
195
+ self.vae.disable_slicing()
196
+
197
+ def enable_vae_tiling(self):
198
+ r"""
199
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
200
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
201
+ processing larger images.
202
+ """
203
+ self.vae.enable_tiling()
204
+
205
+ def disable_vae_tiling(self):
206
+ r"""
207
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
208
+ computing decoding in one step.
209
+ """
210
+ self.vae.disable_tiling()
211
+
212
+ def _encode_prompt(
213
+ self,
214
+ prompt,
215
+ device,
216
+ num_images_per_prompt,
217
+ do_classifier_free_guidance,
218
+ negative_prompt=None,
219
+ prompt_embeds: Optional[torch.FloatTensor] = None,
220
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
221
+ lora_scale: Optional[float] = None,
222
+ ):
223
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
224
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
225
+
226
+ prompt_embeds_tuple = self.encode_prompt(
227
+ prompt=prompt,
228
+ device=device,
229
+ num_images_per_prompt=num_images_per_prompt,
230
+ do_classifier_free_guidance=do_classifier_free_guidance,
231
+ negative_prompt=negative_prompt,
232
+ prompt_embeds=prompt_embeds,
233
+ negative_prompt_embeds=negative_prompt_embeds,
234
+ lora_scale=lora_scale,
235
+ )
236
+
237
+ # concatenate for backwards comp
238
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
239
+
240
+ return prompt_embeds
241
+
242
+ def encode_prompt(
243
+ self,
244
+ prompt,
245
+ device,
246
+ num_images_per_prompt,
247
+ do_classifier_free_guidance,
248
+ negative_prompt=None,
249
+ prompt_embeds: Optional[torch.FloatTensor] = None,
250
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
251
+ lora_scale: Optional[float] = None,
252
+ ):
253
+ r"""
254
+ Encodes the prompt into text encoder hidden states.
255
+
256
+ Args:
257
+ prompt (`str` or `List[str]`, *optional*):
258
+ prompt to be encoded
259
+ device: (`torch.device`):
260
+ torch device
261
+ num_images_per_prompt (`int`):
262
+ number of images that should be generated per prompt
263
+ do_classifier_free_guidance (`bool`):
264
+ whether to use classifier free guidance or not
265
+ negative_prompt (`str` or `List[str]`, *optional*):
266
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
267
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
268
+ less than `1`).
269
+ prompt_embeds (`torch.FloatTensor`, *optional*):
270
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
271
+ provided, text embeddings will be generated from `prompt` input argument.
272
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
273
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
274
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
275
+ argument.
276
+ lora_scale (`float`, *optional*):
277
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
278
+ """
279
+ # set lora scale so that monkey patched LoRA
280
+ # function of text encoder can correctly access it
281
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
282
+ self._lora_scale = lora_scale
283
+
284
+ # dynamically adjust the LoRA scale
285
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
286
+
287
+ if prompt is not None and isinstance(prompt, str):
288
+ batch_size = 1
289
+ elif prompt is not None and isinstance(prompt, list):
290
+ batch_size = len(prompt)
291
+ else:
292
+ batch_size = prompt_embeds.shape[0]
293
+
294
+ if prompt_embeds is None:
295
+ # textual inversion: procecss multi-vector tokens if necessary
296
+ if isinstance(self, TextualInversionLoaderMixin):
297
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
298
+
299
+ text_inputs = self.tokenizer(
300
+ prompt,
301
+ padding="max_length",
302
+ max_length=self.tokenizer.model_max_length,
303
+ truncation=True,
304
+ return_tensors="pt",
305
+ )
306
+ text_input_ids = text_inputs.input_ids
307
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
308
+
309
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
310
+ text_input_ids, untruncated_ids
311
+ ):
312
+ removed_text = self.tokenizer.batch_decode(
313
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
314
+ )
315
+ logger.warning(
316
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
317
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
318
+ )
319
+
320
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
321
+ attention_mask = text_inputs.attention_mask.to(device)
322
+ else:
323
+ attention_mask = None
324
+
325
+ prompt_embeds = self.text_encoder(
326
+ text_input_ids.to(device),
327
+ attention_mask=attention_mask,
328
+ )
329
+ prompt_embeds = prompt_embeds[0]
330
+
331
+ if self.text_encoder is not None:
332
+ prompt_embeds_dtype = self.text_encoder.dtype
333
+ elif self.unet is not None:
334
+ prompt_embeds_dtype = self.unet.dtype
335
+ else:
336
+ prompt_embeds_dtype = prompt_embeds.dtype
337
+
338
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
339
+
340
+ bs_embed, seq_len, _ = prompt_embeds.shape
341
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
342
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
343
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
344
+
345
+ # get unconditional embeddings for classifier free guidance
346
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
347
+ uncond_tokens: List[str]
348
+ if negative_prompt is None:
349
+ uncond_tokens = [""] * batch_size
350
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
351
+ raise TypeError(
352
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
353
+ f" {type(prompt)}."
354
+ )
355
+ elif isinstance(negative_prompt, str):
356
+ uncond_tokens = [negative_prompt]
357
+ elif batch_size != len(negative_prompt):
358
+ raise ValueError(
359
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
360
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
361
+ " the batch size of `prompt`."
362
+ )
363
+ else:
364
+ uncond_tokens = negative_prompt
365
+
366
+ # textual inversion: procecss multi-vector tokens if necessary
367
+ if isinstance(self, TextualInversionLoaderMixin):
368
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
369
+
370
+ max_length = prompt_embeds.shape[1]
371
+ uncond_input = self.tokenizer(
372
+ uncond_tokens,
373
+ padding="max_length",
374
+ max_length=max_length,
375
+ truncation=True,
376
+ return_tensors="pt",
377
+ )
378
+
379
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
380
+ attention_mask = uncond_input.attention_mask.to(device)
381
+ else:
382
+ attention_mask = None
383
+
384
+ negative_prompt_embeds = self.text_encoder(
385
+ uncond_input.input_ids.to(device),
386
+ attention_mask=attention_mask,
387
+ )
388
+ negative_prompt_embeds = negative_prompt_embeds[0]
389
+
390
+ if do_classifier_free_guidance:
391
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
392
+ seq_len = negative_prompt_embeds.shape[1]
393
+
394
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
395
+
396
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
397
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
398
+
399
+ return prompt_embeds, negative_prompt_embeds
400
+
401
+ def run_safety_checker(self, image, device, dtype):
402
+ if self.safety_checker is None:
403
+ has_nsfw_concept = None
404
+ else:
405
+ if torch.is_tensor(image):
406
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
407
+ else:
408
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
409
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
410
+ image, has_nsfw_concept = self.safety_checker(
411
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
412
+ )
413
+ return image, has_nsfw_concept
414
+
415
+ def decode_latents(self, latents):
416
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
417
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
418
+
419
+ latents = 1 / self.vae.config.scaling_factor * latents
420
+ image = self.vae.decode(latents, return_dict=False)[0]
421
+ image = (image / 2 + 0.5).clamp(0, 1)
422
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
423
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
424
+ return image
425
+
426
+ def merge_dW_to_unet(pipe, dW_dict, alpha=1.0):
427
+ _tmp_sd = pipe.unet.state_dict()
428
+ for key in dW_dict.keys():
429
+ _tmp_sd[key] += dW_dict[key] * alpha
430
+ pipe.unet.load_state_dict(_tmp_sd, strict=False)
431
+ return pipe
432
+
433
+ def prepare_extra_step_kwargs(self, generator, eta):
434
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
435
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
436
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
437
+ # and should be between [0, 1]
438
+
439
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
440
+ extra_step_kwargs = {}
441
+ if accepts_eta:
442
+ extra_step_kwargs["eta"] = eta
443
+
444
+ # check if the scheduler accepts generator
445
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
446
+ if accepts_generator:
447
+ extra_step_kwargs["generator"] = generator
448
+ return extra_step_kwargs
449
+
450
+ def check_inputs(
451
+ self,
452
+ prompt,
453
+ height,
454
+ width,
455
+ callback_steps,
456
+ negative_prompt=None,
457
+ prompt_embeds=None,
458
+ negative_prompt_embeds=None,
459
+ ):
460
+ if height % 8 != 0 or width % 8 != 0:
461
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
462
+
463
+ if (callback_steps is None) or (
464
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
465
+ ):
466
+ raise ValueError(
467
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
468
+ f" {type(callback_steps)}."
469
+ )
470
+
471
+ if prompt is not None and prompt_embeds is not None:
472
+ raise ValueError(
473
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
474
+ " only forward one of the two."
475
+ )
476
+ elif prompt is None and prompt_embeds is None:
477
+ raise ValueError(
478
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
479
+ )
480
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
481
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
482
+
483
+ if negative_prompt is not None and negative_prompt_embeds is not None:
484
+ raise ValueError(
485
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
486
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
487
+ )
488
+
489
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
490
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
491
+ raise ValueError(
492
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
493
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
494
+ f" {negative_prompt_embeds.shape}."
495
+ )
496
+
497
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
498
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
499
+ if isinstance(generator, list) and len(generator) != batch_size:
500
+ raise ValueError(
501
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
502
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
503
+ )
504
+
505
+ if latents is None:
506
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
507
+ else:
508
+ latents = latents.to(device)
509
+
510
+ # scale the initial noise by the standard deviation required by the scheduler
511
+ latents = latents * self.scheduler.init_noise_sigma
512
+ return latents
513
+
514
+ @torch.no_grad()
515
+ def __call__(
516
+ self,
517
+ prompt: Union[str, List[str]] = None,
518
+ height: Optional[int] = None,
519
+ width: Optional[int] = None,
520
+ num_inference_steps: int = 50,
521
+ guidance_scale: float = 7.5,
522
+ negative_prompt: Optional[Union[str, List[str]]] = None,
523
+ num_images_per_prompt: Optional[int] = 1,
524
+ eta: float = 0.0,
525
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
526
+ latents: Optional[torch.FloatTensor] = None,
527
+ prompt_embeds: Optional[torch.FloatTensor] = None,
528
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
529
+ output_type: Optional[str] = "pil",
530
+ return_dict: bool = True,
531
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
532
+ callback_steps: int = 1,
533
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
534
+ guidance_rescale: float = 0.0,
535
+ ):
536
+ r"""
537
+ The call function to the pipeline for generation.
538
+
539
+ Args:
540
+ prompt (`str` or `List[str]`, *optional*):
541
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
542
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
543
+ The height in pixels of the generated image.
544
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
545
+ The width in pixels of the generated image.
546
+ num_inference_steps (`int`, *optional*, defaults to 50):
547
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
548
+ expense of slower inference.
549
+ guidance_scale (`float`, *optional*, defaults to 7.5):
550
+ A higher guidance scale value encourages the model to generate images closely linked to the text
551
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
552
+ negative_prompt (`str` or `List[str]`, *optional*):
553
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
554
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
555
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
556
+ The number of images to generate per prompt.
557
+ eta (`float`, *optional*, defaults to 0.0):
558
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
559
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
560
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
561
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
562
+ generation deterministic.
563
+ latents (`torch.FloatTensor`, *optional*):
564
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
565
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
566
+ tensor is generated by sampling using the supplied random `generator`.
567
+ prompt_embeds (`torch.FloatTensor`, *optional*):
568
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
569
+ provided, text embeddings are generated from the `prompt` input argument.
570
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
571
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
572
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
573
+ output_type (`str`, *optional*, defaults to `"pil"`):
574
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
575
+ return_dict (`bool`, *optional*, defaults to `True`):
576
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
577
+ plain tuple.
578
+ callback (`Callable`, *optional*):
579
+ A function that calls every `callback_steps` steps during inference. The function is called with the
580
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
581
+ callback_steps (`int`, *optional*, defaults to 1):
582
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
583
+ every step.
584
+ cross_attention_kwargs (`dict`, *optional*):
585
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
586
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
587
+ guidance_rescale (`float`, *optional*, defaults to 0.7):
588
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
589
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
590
+ using zero terminal SNR.
591
+
592
+ Examples:
593
+
594
+ Returns:
595
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
596
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
597
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
598
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
599
+ "not-safe-for-work" (nsfw) content.
600
+ """
601
+ # 0. Default height and width to unet
602
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
603
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
604
+
605
+ # 1. Check inputs. Raise error if not correct
606
+ self.check_inputs(
607
+ prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
608
+ )
609
+
610
+ # 2. Define call parameters
611
+ if prompt is not None and isinstance(prompt, str):
612
+ batch_size = 1
613
+ elif prompt is not None and isinstance(prompt, list):
614
+ batch_size = len(prompt)
615
+ else:
616
+ batch_size = prompt_embeds.shape[0]
617
+
618
+ device = self._execution_device
619
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
620
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
621
+ # corresponds to doing no classifier free guidance.
622
+ do_classifier_free_guidance = guidance_scale > 1.0
623
+
624
+ # 3. Encode input prompt
625
+ text_encoder_lora_scale = (
626
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
627
+ )
628
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
629
+ prompt,
630
+ device,
631
+ num_images_per_prompt,
632
+ do_classifier_free_guidance,
633
+ negative_prompt,
634
+ prompt_embeds=prompt_embeds,
635
+ negative_prompt_embeds=negative_prompt_embeds,
636
+ lora_scale=text_encoder_lora_scale,
637
+ )
638
+ # For classifier free guidance, we need to do two forward passes.
639
+ # Here we concatenate the unconditional and text embeddings into a single batch
640
+ # to avoid doing two forward passes
641
+ if do_classifier_free_guidance:
642
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
643
+
644
+ # 4. Prepare timesteps
645
+ timesteps = [(1.0 - i / num_inference_steps) * 1000.0 for i in range(num_inference_steps)]
646
+
647
+ # 5. Prepare latent variables
648
+ num_channels_latents = self.unet.config.in_channels
649
+ latents = self.prepare_latents(
650
+ batch_size * num_images_per_prompt,
651
+ num_channels_latents,
652
+ height,
653
+ width,
654
+ prompt_embeds.dtype,
655
+ device,
656
+ generator,
657
+ latents,
658
+ )
659
+
660
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
661
+ dt = 1.0 / num_inference_steps
662
+
663
+ # 7. Denoising loop of Euler discretization from t = 0 to t = 1
664
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
665
+ for i, t in enumerate(timesteps):
666
+ # expand the latents if we are doing classifier free guidance
667
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
668
+
669
+ vec_t = torch.ones((latent_model_input.shape[0],), device=latents.device) * t
670
+
671
+ v_pred = self.unet(latent_model_input, vec_t, encoder_hidden_states=prompt_embeds).sample
672
+
673
+ # perform guidance
674
+ if do_classifier_free_guidance:
675
+ v_pred_neg, v_pred_text = v_pred.chunk(2)
676
+ v_pred = v_pred_neg + guidance_scale * (v_pred_text - v_pred_neg)
677
+
678
+ latents = latents + dt * v_pred
679
+
680
+ # call the callback, if provided
681
+ if i == len(timesteps) - 1 or ((i + 1) % self.scheduler.order == 0):
682
+ progress_bar.update()
683
+ if callback is not None and i % callback_steps == 0:
684
+ step_idx = i // getattr(self.scheduler, "order", 1)
685
+ callback(step_idx, t, latents)
686
+
687
+ if not output_type == "latent":
688
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
689
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
690
+ else:
691
+ image = latents
692
+ has_nsfw_concept = None
693
+
694
+ if has_nsfw_concept is None:
695
+ do_denormalize = [True] * image.shape[0]
696
+ else:
697
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
698
+
699
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
700
+
701
+ # Offload all models
702
+ self.maybe_free_model_hooks()
703
+
704
+ if not return_dict:
705
+ return (image, has_nsfw_concept)
706
+
707
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/interpolate_stable_diffusion.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import time
3
+ from pathlib import Path
4
+ from typing import Callable, List, Optional, Union
5
+
6
+ import numpy as np
7
+ import torch
8
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
9
+
10
+ from diffusers import DiffusionPipeline
11
+ from diffusers.configuration_utils import FrozenDict
12
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
15
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
16
+ from diffusers.utils import deprecate, logging
17
+
18
+
19
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
20
+
21
+
22
+ def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
23
+ """helper function to spherically interpolate two arrays v1 v2"""
24
+
25
+ if not isinstance(v0, np.ndarray):
26
+ inputs_are_torch = True
27
+ input_device = v0.device
28
+ v0 = v0.cpu().numpy()
29
+ v1 = v1.cpu().numpy()
30
+
31
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
32
+ if np.abs(dot) > DOT_THRESHOLD:
33
+ v2 = (1 - t) * v0 + t * v1
34
+ else:
35
+ theta_0 = np.arccos(dot)
36
+ sin_theta_0 = np.sin(theta_0)
37
+ theta_t = theta_0 * t
38
+ sin_theta_t = np.sin(theta_t)
39
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
40
+ s1 = sin_theta_t / sin_theta_0
41
+ v2 = s0 * v0 + s1 * v1
42
+
43
+ if inputs_are_torch:
44
+ v2 = torch.from_numpy(v2).to(input_device)
45
+
46
+ return v2
47
+
48
+
49
+ class StableDiffusionWalkPipeline(DiffusionPipeline):
50
+ r"""
51
+ Pipeline for text-to-image generation using Stable Diffusion.
52
+
53
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
54
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
55
+
56
+ Args:
57
+ vae ([`AutoencoderKL`]):
58
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
59
+ text_encoder ([`CLIPTextModel`]):
60
+ Frozen text-encoder. Stable Diffusion uses the text portion of
61
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
62
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
63
+ tokenizer (`CLIPTokenizer`):
64
+ Tokenizer of class
65
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
66
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
67
+ scheduler ([`SchedulerMixin`]):
68
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
69
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
70
+ safety_checker ([`StableDiffusionSafetyChecker`]):
71
+ Classification module that estimates whether generated images could be considered offensive or harmful.
72
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
73
+ feature_extractor ([`CLIPImageProcessor`]):
74
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ vae: AutoencoderKL,
80
+ text_encoder: CLIPTextModel,
81
+ tokenizer: CLIPTokenizer,
82
+ unet: UNet2DConditionModel,
83
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
84
+ safety_checker: StableDiffusionSafetyChecker,
85
+ feature_extractor: CLIPImageProcessor,
86
+ ):
87
+ super().__init__()
88
+
89
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
90
+ deprecation_message = (
91
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
92
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
93
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
94
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
95
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
96
+ " file"
97
+ )
98
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
99
+ new_config = dict(scheduler.config)
100
+ new_config["steps_offset"] = 1
101
+ scheduler._internal_dict = FrozenDict(new_config)
102
+
103
+ if safety_checker is None:
104
+ logger.warning(
105
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
106
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
107
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
108
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
109
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
110
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
111
+ )
112
+
113
+ self.register_modules(
114
+ vae=vae,
115
+ text_encoder=text_encoder,
116
+ tokenizer=tokenizer,
117
+ unet=unet,
118
+ scheduler=scheduler,
119
+ safety_checker=safety_checker,
120
+ feature_extractor=feature_extractor,
121
+ )
122
+
123
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
124
+ r"""
125
+ Enable sliced attention computation.
126
+
127
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
128
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
129
+
130
+ Args:
131
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
132
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
133
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
134
+ `attention_head_dim` must be a multiple of `slice_size`.
135
+ """
136
+ if slice_size == "auto":
137
+ # half the attention head size is usually a good trade-off between
138
+ # speed and memory
139
+ slice_size = self.unet.config.attention_head_dim // 2
140
+ self.unet.set_attention_slice(slice_size)
141
+
142
+ def disable_attention_slicing(self):
143
+ r"""
144
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
145
+ back to computing attention in one step.
146
+ """
147
+ # set slice_size = `None` to disable `attention slicing`
148
+ self.enable_attention_slicing(None)
149
+
150
+ @torch.no_grad()
151
+ def __call__(
152
+ self,
153
+ prompt: Optional[Union[str, List[str]]] = None,
154
+ height: int = 512,
155
+ width: int = 512,
156
+ num_inference_steps: int = 50,
157
+ guidance_scale: float = 7.5,
158
+ negative_prompt: Optional[Union[str, List[str]]] = None,
159
+ num_images_per_prompt: Optional[int] = 1,
160
+ eta: float = 0.0,
161
+ generator: Optional[torch.Generator] = None,
162
+ latents: Optional[torch.FloatTensor] = None,
163
+ output_type: Optional[str] = "pil",
164
+ return_dict: bool = True,
165
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
166
+ callback_steps: int = 1,
167
+ text_embeddings: Optional[torch.FloatTensor] = None,
168
+ **kwargs,
169
+ ):
170
+ r"""
171
+ Function invoked when calling the pipeline for generation.
172
+
173
+ Args:
174
+ prompt (`str` or `List[str]`, *optional*, defaults to `None`):
175
+ The prompt or prompts to guide the image generation. If not provided, `text_embeddings` is required.
176
+ height (`int`, *optional*, defaults to 512):
177
+ The height in pixels of the generated image.
178
+ width (`int`, *optional*, defaults to 512):
179
+ The width in pixels of the generated image.
180
+ num_inference_steps (`int`, *optional*, defaults to 50):
181
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
182
+ expense of slower inference.
183
+ guidance_scale (`float`, *optional*, defaults to 7.5):
184
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
185
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
186
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
187
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
188
+ usually at the expense of lower image quality.
189
+ negative_prompt (`str` or `List[str]`, *optional*):
190
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
191
+ if `guidance_scale` is less than `1`).
192
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
193
+ The number of images to generate per prompt.
194
+ eta (`float`, *optional*, defaults to 0.0):
195
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
196
+ [`schedulers.DDIMScheduler`], will be ignored for others.
197
+ generator (`torch.Generator`, *optional*):
198
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
199
+ deterministic.
200
+ latents (`torch.FloatTensor`, *optional*):
201
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
202
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
203
+ tensor will ge generated by sampling using the supplied random `generator`.
204
+ output_type (`str`, *optional*, defaults to `"pil"`):
205
+ The output format of the generate image. Choose between
206
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
207
+ return_dict (`bool`, *optional*, defaults to `True`):
208
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
209
+ plain tuple.
210
+ callback (`Callable`, *optional*):
211
+ A function that will be called every `callback_steps` steps during inference. The function will be
212
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
213
+ callback_steps (`int`, *optional*, defaults to 1):
214
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
215
+ called at every step.
216
+ text_embeddings (`torch.FloatTensor`, *optional*, defaults to `None`):
217
+ Pre-generated text embeddings to be used as inputs for image generation. Can be used in place of
218
+ `prompt` to avoid re-computing the embeddings. If not provided, the embeddings will be generated from
219
+ the supplied `prompt`.
220
+
221
+ Returns:
222
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
223
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
224
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
225
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
226
+ (nsfw) content, according to the `safety_checker`.
227
+ """
228
+
229
+ if height % 8 != 0 or width % 8 != 0:
230
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
231
+
232
+ if (callback_steps is None) or (
233
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
234
+ ):
235
+ raise ValueError(
236
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
237
+ f" {type(callback_steps)}."
238
+ )
239
+
240
+ if text_embeddings is None:
241
+ if isinstance(prompt, str):
242
+ batch_size = 1
243
+ elif isinstance(prompt, list):
244
+ batch_size = len(prompt)
245
+ else:
246
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
247
+
248
+ # get prompt text embeddings
249
+ text_inputs = self.tokenizer(
250
+ prompt,
251
+ padding="max_length",
252
+ max_length=self.tokenizer.model_max_length,
253
+ return_tensors="pt",
254
+ )
255
+ text_input_ids = text_inputs.input_ids
256
+
257
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
258
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
259
+ print(
260
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
261
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
262
+ )
263
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
264
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
265
+ else:
266
+ batch_size = text_embeddings.shape[0]
267
+
268
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
269
+ bs_embed, seq_len, _ = text_embeddings.shape
270
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
271
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
272
+
273
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
274
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
275
+ # corresponds to doing no classifier free guidance.
276
+ do_classifier_free_guidance = guidance_scale > 1.0
277
+ # get unconditional embeddings for classifier free guidance
278
+ if do_classifier_free_guidance:
279
+ uncond_tokens: List[str]
280
+ if negative_prompt is None:
281
+ uncond_tokens = [""] * batch_size
282
+ elif type(prompt) is not type(negative_prompt):
283
+ raise TypeError(
284
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
285
+ f" {type(prompt)}."
286
+ )
287
+ elif isinstance(negative_prompt, str):
288
+ uncond_tokens = [negative_prompt]
289
+ elif batch_size != len(negative_prompt):
290
+ raise ValueError(
291
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
292
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
293
+ " the batch size of `prompt`."
294
+ )
295
+ else:
296
+ uncond_tokens = negative_prompt
297
+
298
+ max_length = self.tokenizer.model_max_length
299
+ uncond_input = self.tokenizer(
300
+ uncond_tokens,
301
+ padding="max_length",
302
+ max_length=max_length,
303
+ truncation=True,
304
+ return_tensors="pt",
305
+ )
306
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
307
+
308
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
309
+ seq_len = uncond_embeddings.shape[1]
310
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
311
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
312
+
313
+ # For classifier free guidance, we need to do two forward passes.
314
+ # Here we concatenate the unconditional and text embeddings into a single batch
315
+ # to avoid doing two forward passes
316
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
317
+
318
+ # get the initial random noise unless the user supplied it
319
+
320
+ # Unlike in other pipelines, latents need to be generated in the target device
321
+ # for 1-to-1 results reproducibility with the CompVis implementation.
322
+ # However this currently doesn't work in `mps`.
323
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
324
+ latents_dtype = text_embeddings.dtype
325
+ if latents is None:
326
+ if self.device.type == "mps":
327
+ # randn does not work reproducibly on mps
328
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
329
+ self.device
330
+ )
331
+ else:
332
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
333
+ else:
334
+ if latents.shape != latents_shape:
335
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
336
+ latents = latents.to(self.device)
337
+
338
+ # set timesteps
339
+ self.scheduler.set_timesteps(num_inference_steps)
340
+
341
+ # Some schedulers like PNDM have timesteps as arrays
342
+ # It's more optimized to move all timesteps to correct device beforehand
343
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
344
+
345
+ # scale the initial noise by the standard deviation required by the scheduler
346
+ latents = latents * self.scheduler.init_noise_sigma
347
+
348
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
349
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
350
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
351
+ # and should be between [0, 1]
352
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
353
+ extra_step_kwargs = {}
354
+ if accepts_eta:
355
+ extra_step_kwargs["eta"] = eta
356
+
357
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
358
+ # expand the latents if we are doing classifier free guidance
359
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
360
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
361
+
362
+ # predict the noise residual
363
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
364
+
365
+ # perform guidance
366
+ if do_classifier_free_guidance:
367
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
368
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
369
+
370
+ # compute the previous noisy sample x_t -> x_t-1
371
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
372
+
373
+ # call the callback, if provided
374
+ if callback is not None and i % callback_steps == 0:
375
+ step_idx = i // getattr(self.scheduler, "order", 1)
376
+ callback(step_idx, t, latents)
377
+
378
+ latents = 1 / 0.18215 * latents
379
+ image = self.vae.decode(latents).sample
380
+
381
+ image = (image / 2 + 0.5).clamp(0, 1)
382
+
383
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
384
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
385
+
386
+ if self.safety_checker is not None:
387
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
388
+ self.device
389
+ )
390
+ image, has_nsfw_concept = self.safety_checker(
391
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
392
+ )
393
+ else:
394
+ has_nsfw_concept = None
395
+
396
+ if output_type == "pil":
397
+ image = self.numpy_to_pil(image)
398
+
399
+ if not return_dict:
400
+ return (image, has_nsfw_concept)
401
+
402
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
403
+
404
+ def embed_text(self, text):
405
+ """takes in text and turns it into text embeddings"""
406
+ text_input = self.tokenizer(
407
+ text,
408
+ padding="max_length",
409
+ max_length=self.tokenizer.model_max_length,
410
+ truncation=True,
411
+ return_tensors="pt",
412
+ )
413
+ with torch.no_grad():
414
+ embed = self.text_encoder(text_input.input_ids.to(self.device))[0]
415
+ return embed
416
+
417
+ def get_noise(self, seed, dtype=torch.float32, height=512, width=512):
418
+ """Takes in random seed and returns corresponding noise vector"""
419
+ return torch.randn(
420
+ (1, self.unet.config.in_channels, height // 8, width // 8),
421
+ generator=torch.Generator(device=self.device).manual_seed(seed),
422
+ device=self.device,
423
+ dtype=dtype,
424
+ )
425
+
426
+ def walk(
427
+ self,
428
+ prompts: List[str],
429
+ seeds: List[int],
430
+ num_interpolation_steps: Optional[int] = 6,
431
+ output_dir: Optional[str] = "./dreams",
432
+ name: Optional[str] = None,
433
+ batch_size: Optional[int] = 1,
434
+ height: Optional[int] = 512,
435
+ width: Optional[int] = 512,
436
+ guidance_scale: Optional[float] = 7.5,
437
+ num_inference_steps: Optional[int] = 50,
438
+ eta: Optional[float] = 0.0,
439
+ ) -> List[str]:
440
+ """
441
+ Walks through a series of prompts and seeds, interpolating between them and saving the results to disk.
442
+
443
+ Args:
444
+ prompts (`List[str]`):
445
+ List of prompts to generate images for.
446
+ seeds (`List[int]`):
447
+ List of seeds corresponding to provided prompts. Must be the same length as prompts.
448
+ num_interpolation_steps (`int`, *optional*, defaults to 6):
449
+ Number of interpolation steps to take between prompts.
450
+ output_dir (`str`, *optional*, defaults to `./dreams`):
451
+ Directory to save the generated images to.
452
+ name (`str`, *optional*, defaults to `None`):
453
+ Subdirectory of `output_dir` to save the generated images to. If `None`, the name will
454
+ be the current time.
455
+ batch_size (`int`, *optional*, defaults to 1):
456
+ Number of images to generate at once.
457
+ height (`int`, *optional*, defaults to 512):
458
+ Height of the generated images.
459
+ width (`int`, *optional*, defaults to 512):
460
+ Width of the generated images.
461
+ guidance_scale (`float`, *optional*, defaults to 7.5):
462
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
463
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
464
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
465
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
466
+ usually at the expense of lower image quality.
467
+ num_inference_steps (`int`, *optional*, defaults to 50):
468
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
469
+ expense of slower inference.
470
+ eta (`float`, *optional*, defaults to 0.0):
471
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
472
+ [`schedulers.DDIMScheduler`], will be ignored for others.
473
+
474
+ Returns:
475
+ `List[str]`: List of paths to the generated images.
476
+ """
477
+ if not len(prompts) == len(seeds):
478
+ raise ValueError(
479
+ f"Number of prompts and seeds must be equalGot {len(prompts)} prompts and {len(seeds)} seeds"
480
+ )
481
+
482
+ name = name or time.strftime("%Y%m%d-%H%M%S")
483
+ save_path = Path(output_dir) / name
484
+ save_path.mkdir(exist_ok=True, parents=True)
485
+
486
+ frame_idx = 0
487
+ frame_filepaths = []
488
+ for prompt_a, prompt_b, seed_a, seed_b in zip(prompts, prompts[1:], seeds, seeds[1:]):
489
+ # Embed Text
490
+ embed_a = self.embed_text(prompt_a)
491
+ embed_b = self.embed_text(prompt_b)
492
+
493
+ # Get Noise
494
+ noise_dtype = embed_a.dtype
495
+ noise_a = self.get_noise(seed_a, noise_dtype, height, width)
496
+ noise_b = self.get_noise(seed_b, noise_dtype, height, width)
497
+
498
+ noise_batch, embeds_batch = None, None
499
+ T = np.linspace(0.0, 1.0, num_interpolation_steps)
500
+ for i, t in enumerate(T):
501
+ noise = slerp(float(t), noise_a, noise_b)
502
+ embed = torch.lerp(embed_a, embed_b, t)
503
+
504
+ noise_batch = noise if noise_batch is None else torch.cat([noise_batch, noise], dim=0)
505
+ embeds_batch = embed if embeds_batch is None else torch.cat([embeds_batch, embed], dim=0)
506
+
507
+ batch_is_ready = embeds_batch.shape[0] == batch_size or i + 1 == T.shape[0]
508
+ if batch_is_ready:
509
+ outputs = self(
510
+ latents=noise_batch,
511
+ text_embeddings=embeds_batch,
512
+ height=height,
513
+ width=width,
514
+ guidance_scale=guidance_scale,
515
+ eta=eta,
516
+ num_inference_steps=num_inference_steps,
517
+ )
518
+ noise_batch, embeds_batch = None, None
519
+
520
+ for image in outputs["images"]:
521
+ frame_filepath = str(save_path / f"frame_{frame_idx:06d}.png")
522
+ image.save(frame_filepath)
523
+ frame_filepaths.append(frame_filepath)
524
+ frame_idx += 1
525
+ return frame_filepaths
v0.26.3/ip_adapter_face_id.py ADDED
@@ -0,0 +1,1525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from packaging import version
22
+ from safetensors import safe_open
23
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
24
+
25
+ from diffusers.configuration_utils import FrozenDict
26
+ from diffusers.image_processor import VaeImageProcessor
27
+ from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
29
+ from diffusers.models.attention_processor import FusedAttnProcessor2_0
30
+ from diffusers.models.lora import LoRALinearLayer, adjust_lora_scale_text_encoder
31
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
32
+ from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
33
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
34
+ from diffusers.schedulers import KarrasDiffusionSchedulers
35
+ from diffusers.utils import (
36
+ USE_PEFT_BACKEND,
37
+ _get_model_file,
38
+ deprecate,
39
+ logging,
40
+ scale_lora_layers,
41
+ unscale_lora_layers,
42
+ )
43
+ from diffusers.utils.torch_utils import randn_tensor
44
+
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ class LoRAIPAdapterAttnProcessor(nn.Module):
50
+ r"""
51
+ Attention processor for IP-Adapater.
52
+ Args:
53
+ hidden_size (`int`):
54
+ The hidden size of the attention layer.
55
+ cross_attention_dim (`int`):
56
+ The number of channels in the `encoder_hidden_states`.
57
+ rank (`int`, defaults to 4):
58
+ The dimension of the LoRA update matrices.
59
+ network_alpha (`int`, *optional*):
60
+ Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
61
+ lora_scale (`float`, defaults to 1.0):
62
+ the weight scale of LoRA.
63
+ scale (`float`, defaults to 1.0):
64
+ the weight scale of image prompt.
65
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
66
+ The context length of the image features.
67
+ """
68
+
69
+ def __init__(
70
+ self,
71
+ hidden_size,
72
+ cross_attention_dim=None,
73
+ rank=4,
74
+ network_alpha=None,
75
+ lora_scale=1.0,
76
+ scale=1.0,
77
+ num_tokens=4,
78
+ ):
79
+ super().__init__()
80
+
81
+ self.rank = rank
82
+ self.lora_scale = lora_scale
83
+
84
+ self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
85
+ self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
86
+ self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
87
+ self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
88
+
89
+ self.hidden_size = hidden_size
90
+ self.cross_attention_dim = cross_attention_dim
91
+ self.scale = scale
92
+ self.num_tokens = num_tokens
93
+
94
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
95
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
96
+
97
+ def __call__(
98
+ self,
99
+ attn,
100
+ hidden_states,
101
+ encoder_hidden_states=None,
102
+ attention_mask=None,
103
+ temb=None,
104
+ ):
105
+ residual = hidden_states
106
+
107
+ if attn.spatial_norm is not None:
108
+ hidden_states = attn.spatial_norm(hidden_states, temb)
109
+
110
+ input_ndim = hidden_states.ndim
111
+
112
+ if input_ndim == 4:
113
+ batch_size, channel, height, width = hidden_states.shape
114
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
115
+
116
+ batch_size, sequence_length, _ = (
117
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
118
+ )
119
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
120
+
121
+ if attn.group_norm is not None:
122
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
123
+
124
+ query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
125
+
126
+ if encoder_hidden_states is None:
127
+ encoder_hidden_states = hidden_states
128
+ else:
129
+ # get encoder_hidden_states, ip_hidden_states
130
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
131
+ encoder_hidden_states, ip_hidden_states = (
132
+ encoder_hidden_states[:, :end_pos, :],
133
+ encoder_hidden_states[:, end_pos:, :],
134
+ )
135
+ if attn.norm_cross:
136
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
137
+
138
+ key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
139
+ value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
140
+
141
+ query = attn.head_to_batch_dim(query)
142
+ key = attn.head_to_batch_dim(key)
143
+ value = attn.head_to_batch_dim(value)
144
+
145
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
146
+ hidden_states = torch.bmm(attention_probs, value)
147
+ hidden_states = attn.batch_to_head_dim(hidden_states)
148
+
149
+ # for ip-adapter
150
+ ip_key = self.to_k_ip(ip_hidden_states)
151
+ ip_value = self.to_v_ip(ip_hidden_states)
152
+
153
+ ip_key = attn.head_to_batch_dim(ip_key)
154
+ ip_value = attn.head_to_batch_dim(ip_value)
155
+
156
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
157
+ ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
158
+ ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
159
+
160
+ hidden_states = hidden_states + self.scale * ip_hidden_states
161
+
162
+ # linear proj
163
+ hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
164
+ # dropout
165
+ hidden_states = attn.to_out[1](hidden_states)
166
+
167
+ if input_ndim == 4:
168
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
169
+
170
+ if attn.residual_connection:
171
+ hidden_states = hidden_states + residual
172
+
173
+ hidden_states = hidden_states / attn.rescale_output_factor
174
+
175
+ return hidden_states
176
+
177
+
178
+ class LoRAIPAdapterAttnProcessor2_0(nn.Module):
179
+ r"""
180
+ Attention processor for IP-Adapater for PyTorch 2.0.
181
+ Args:
182
+ hidden_size (`int`):
183
+ The hidden size of the attention layer.
184
+ cross_attention_dim (`int`):
185
+ The number of channels in the `encoder_hidden_states`.
186
+ rank (`int`, defaults to 4):
187
+ The dimension of the LoRA update matrices.
188
+ network_alpha (`int`, *optional*):
189
+ Equivalent to `alpha` but it's usage is specific to Kohya (A1111) style LoRAs.
190
+ lora_scale (`float`, defaults to 1.0):
191
+ the weight scale of LoRA.
192
+ scale (`float`, defaults to 1.0):
193
+ the weight scale of image prompt.
194
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
195
+ The context length of the image features.
196
+ """
197
+
198
+ def __init__(
199
+ self,
200
+ hidden_size,
201
+ cross_attention_dim=None,
202
+ rank=4,
203
+ network_alpha=None,
204
+ lora_scale=1.0,
205
+ scale=1.0,
206
+ num_tokens=4,
207
+ ):
208
+ super().__init__()
209
+
210
+ self.rank = rank
211
+ self.lora_scale = lora_scale
212
+
213
+ self.to_q_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
214
+ self.to_k_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
215
+ self.to_v_lora = LoRALinearLayer(cross_attention_dim or hidden_size, hidden_size, rank, network_alpha)
216
+ self.to_out_lora = LoRALinearLayer(hidden_size, hidden_size, rank, network_alpha)
217
+
218
+ self.hidden_size = hidden_size
219
+ self.cross_attention_dim = cross_attention_dim
220
+ self.scale = scale
221
+ self.num_tokens = num_tokens
222
+
223
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
224
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
225
+
226
+ def __call__(
227
+ self,
228
+ attn,
229
+ hidden_states,
230
+ encoder_hidden_states=None,
231
+ attention_mask=None,
232
+ temb=None,
233
+ ):
234
+ residual = hidden_states
235
+
236
+ if attn.spatial_norm is not None:
237
+ hidden_states = attn.spatial_norm(hidden_states, temb)
238
+
239
+ input_ndim = hidden_states.ndim
240
+
241
+ if input_ndim == 4:
242
+ batch_size, channel, height, width = hidden_states.shape
243
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
244
+
245
+ batch_size, sequence_length, _ = (
246
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
247
+ )
248
+
249
+ if attention_mask is not None:
250
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
251
+ # scaled_dot_product_attention expects attention_mask shape to be
252
+ # (batch, heads, source_length, target_length)
253
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
254
+
255
+ if attn.group_norm is not None:
256
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
257
+
258
+ query = attn.to_q(hidden_states) + self.lora_scale * self.to_q_lora(hidden_states)
259
+
260
+ if encoder_hidden_states is None:
261
+ encoder_hidden_states = hidden_states
262
+ else:
263
+ # get encoder_hidden_states, ip_hidden_states
264
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
265
+ encoder_hidden_states, ip_hidden_states = (
266
+ encoder_hidden_states[:, :end_pos, :],
267
+ encoder_hidden_states[:, end_pos:, :],
268
+ )
269
+ if attn.norm_cross:
270
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
271
+
272
+ key = attn.to_k(encoder_hidden_states) + self.lora_scale * self.to_k_lora(encoder_hidden_states)
273
+ value = attn.to_v(encoder_hidden_states) + self.lora_scale * self.to_v_lora(encoder_hidden_states)
274
+
275
+ inner_dim = key.shape[-1]
276
+ head_dim = inner_dim // attn.heads
277
+
278
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
279
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
280
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
281
+
282
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
283
+ # TODO: add support for attn.scale when we move to Torch 2.1
284
+ hidden_states = F.scaled_dot_product_attention(
285
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
286
+ )
287
+
288
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
289
+ hidden_states = hidden_states.to(query.dtype)
290
+
291
+ # for ip-adapter
292
+ ip_key = self.to_k_ip(ip_hidden_states)
293
+ ip_value = self.to_v_ip(ip_hidden_states)
294
+
295
+ ip_key = ip_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
296
+ ip_value = ip_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
297
+
298
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
299
+ # TODO: add support for attn.scale when we move to Torch 2.1
300
+ ip_hidden_states = F.scaled_dot_product_attention(
301
+ query, ip_key, ip_value, attn_mask=None, dropout_p=0.0, is_causal=False
302
+ )
303
+
304
+ ip_hidden_states = ip_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
305
+ ip_hidden_states = ip_hidden_states.to(query.dtype)
306
+
307
+ hidden_states = hidden_states + self.scale * ip_hidden_states
308
+
309
+ # linear proj
310
+ hidden_states = attn.to_out[0](hidden_states) + self.lora_scale * self.to_out_lora(hidden_states)
311
+ # dropout
312
+ hidden_states = attn.to_out[1](hidden_states)
313
+
314
+ if input_ndim == 4:
315
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
316
+
317
+ if attn.residual_connection:
318
+ hidden_states = hidden_states + residual
319
+
320
+ hidden_states = hidden_states / attn.rescale_output_factor
321
+
322
+ return hidden_states
323
+
324
+
325
+ class IPAdapterFullImageProjection(nn.Module):
326
+ def __init__(self, image_embed_dim=1024, cross_attention_dim=1024, mult=1, num_tokens=1):
327
+ super().__init__()
328
+ from diffusers.models.attention import FeedForward
329
+
330
+ self.num_tokens = num_tokens
331
+ self.cross_attention_dim = cross_attention_dim
332
+ self.ff = FeedForward(image_embed_dim, cross_attention_dim * num_tokens, mult=mult, activation_fn="gelu")
333
+ self.norm = nn.LayerNorm(cross_attention_dim)
334
+
335
+ def forward(self, image_embeds: torch.FloatTensor):
336
+ x = self.ff(image_embeds)
337
+ x = x.reshape(-1, self.num_tokens, self.cross_attention_dim)
338
+ return self.norm(x)
339
+
340
+
341
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
342
+ """
343
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
344
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
345
+ """
346
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
347
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
348
+ # rescale the results from guidance (fixes overexposure)
349
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
350
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
351
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
352
+ return noise_cfg
353
+
354
+
355
+ def retrieve_timesteps(
356
+ scheduler,
357
+ num_inference_steps: Optional[int] = None,
358
+ device: Optional[Union[str, torch.device]] = None,
359
+ timesteps: Optional[List[int]] = None,
360
+ **kwargs,
361
+ ):
362
+ """
363
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
364
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
365
+
366
+ Args:
367
+ scheduler (`SchedulerMixin`):
368
+ The scheduler to get timesteps from.
369
+ num_inference_steps (`int`):
370
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
371
+ `timesteps` must be `None`.
372
+ device (`str` or `torch.device`, *optional*):
373
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
374
+ timesteps (`List[int]`, *optional*):
375
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
376
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
377
+ must be `None`.
378
+
379
+ Returns:
380
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
381
+ second element is the number of inference steps.
382
+ """
383
+ if timesteps is not None:
384
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
385
+ if not accepts_timesteps:
386
+ raise ValueError(
387
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
388
+ f" timestep schedules. Please check whether you are using the correct scheduler."
389
+ )
390
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
391
+ timesteps = scheduler.timesteps
392
+ num_inference_steps = len(timesteps)
393
+ else:
394
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
395
+ timesteps = scheduler.timesteps
396
+ return timesteps, num_inference_steps
397
+
398
+
399
+ class IPAdapterFaceIDStableDiffusionPipeline(
400
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
401
+ ):
402
+ r"""
403
+ Pipeline for text-to-image generation using Stable Diffusion.
404
+
405
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
406
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
407
+
408
+ The pipeline also inherits the following loading methods:
409
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
410
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
411
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
412
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
413
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
414
+
415
+ Args:
416
+ vae ([`AutoencoderKL`]):
417
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
418
+ text_encoder ([`~transformers.CLIPTextModel`]):
419
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
420
+ tokenizer ([`~transformers.CLIPTokenizer`]):
421
+ A `CLIPTokenizer` to tokenize text.
422
+ unet ([`UNet2DConditionModel`]):
423
+ A `UNet2DConditionModel` to denoise the encoded image latents.
424
+ scheduler ([`SchedulerMixin`]):
425
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
426
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
427
+ safety_checker ([`StableDiffusionSafetyChecker`]):
428
+ Classification module that estimates whether generated images could be considered offensive or harmful.
429
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
430
+ about a model's potential harms.
431
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
432
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
433
+ """
434
+
435
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
436
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
437
+ _exclude_from_cpu_offload = ["safety_checker"]
438
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
439
+
440
+ def __init__(
441
+ self,
442
+ vae: AutoencoderKL,
443
+ text_encoder: CLIPTextModel,
444
+ tokenizer: CLIPTokenizer,
445
+ unet: UNet2DConditionModel,
446
+ scheduler: KarrasDiffusionSchedulers,
447
+ safety_checker: StableDiffusionSafetyChecker,
448
+ feature_extractor: CLIPImageProcessor,
449
+ image_encoder: CLIPVisionModelWithProjection = None,
450
+ requires_safety_checker: bool = True,
451
+ ):
452
+ super().__init__()
453
+
454
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
455
+ deprecation_message = (
456
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
457
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
458
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
459
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
460
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
461
+ " file"
462
+ )
463
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
464
+ new_config = dict(scheduler.config)
465
+ new_config["steps_offset"] = 1
466
+ scheduler._internal_dict = FrozenDict(new_config)
467
+
468
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
469
+ deprecation_message = (
470
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
471
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
472
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
473
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
474
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
475
+ )
476
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
477
+ new_config = dict(scheduler.config)
478
+ new_config["clip_sample"] = False
479
+ scheduler._internal_dict = FrozenDict(new_config)
480
+
481
+ if safety_checker is None and requires_safety_checker:
482
+ logger.warning(
483
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
484
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
485
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
486
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
487
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
488
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
489
+ )
490
+
491
+ if safety_checker is not None and feature_extractor is None:
492
+ raise ValueError(
493
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
494
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
495
+ )
496
+
497
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
498
+ version.parse(unet.config._diffusers_version).base_version
499
+ ) < version.parse("0.9.0.dev0")
500
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
501
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
502
+ deprecation_message = (
503
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
504
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
505
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
506
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
507
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
508
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
509
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
510
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
511
+ " the `unet/config.json` file"
512
+ )
513
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
514
+ new_config = dict(unet.config)
515
+ new_config["sample_size"] = 64
516
+ unet._internal_dict = FrozenDict(new_config)
517
+
518
+ self.register_modules(
519
+ vae=vae,
520
+ text_encoder=text_encoder,
521
+ tokenizer=tokenizer,
522
+ unet=unet,
523
+ scheduler=scheduler,
524
+ safety_checker=safety_checker,
525
+ feature_extractor=feature_extractor,
526
+ image_encoder=image_encoder,
527
+ )
528
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
529
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
530
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
531
+
532
+ def load_ip_adapter_face_id(self, pretrained_model_name_or_path_or_dict, weight_name, **kwargs):
533
+ cache_dir = kwargs.pop("cache_dir", None)
534
+ force_download = kwargs.pop("force_download", False)
535
+ resume_download = kwargs.pop("resume_download", False)
536
+ proxies = kwargs.pop("proxies", None)
537
+ local_files_only = kwargs.pop("local_files_only", None)
538
+ token = kwargs.pop("token", None)
539
+ revision = kwargs.pop("revision", None)
540
+ subfolder = kwargs.pop("subfolder", None)
541
+
542
+ user_agent = {
543
+ "file_type": "attn_procs_weights",
544
+ "framework": "pytorch",
545
+ }
546
+ model_file = _get_model_file(
547
+ pretrained_model_name_or_path_or_dict,
548
+ weights_name=weight_name,
549
+ cache_dir=cache_dir,
550
+ force_download=force_download,
551
+ resume_download=resume_download,
552
+ proxies=proxies,
553
+ local_files_only=local_files_only,
554
+ token=token,
555
+ revision=revision,
556
+ subfolder=subfolder,
557
+ user_agent=user_agent,
558
+ )
559
+ if weight_name.endswith(".safetensors"):
560
+ state_dict = {"image_proj": {}, "ip_adapter": {}}
561
+ with safe_open(model_file, framework="pt", device="cpu") as f:
562
+ for key in f.keys():
563
+ if key.startswith("image_proj."):
564
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
565
+ elif key.startswith("ip_adapter."):
566
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
567
+ else:
568
+ state_dict = torch.load(model_file, map_location="cpu")
569
+ self._load_ip_adapter_weights(state_dict)
570
+
571
+ def convert_ip_adapter_image_proj_to_diffusers(self, state_dict):
572
+ updated_state_dict = {}
573
+ clip_embeddings_dim_in = state_dict["proj.0.weight"].shape[1]
574
+ clip_embeddings_dim_out = state_dict["proj.0.weight"].shape[0]
575
+ multiplier = clip_embeddings_dim_out // clip_embeddings_dim_in
576
+ norm_layer = "norm.weight"
577
+ cross_attention_dim = state_dict[norm_layer].shape[0]
578
+ num_tokens = state_dict["proj.2.weight"].shape[0] // cross_attention_dim
579
+
580
+ image_projection = IPAdapterFullImageProjection(
581
+ cross_attention_dim=cross_attention_dim,
582
+ image_embed_dim=clip_embeddings_dim_in,
583
+ mult=multiplier,
584
+ num_tokens=num_tokens,
585
+ )
586
+
587
+ for key, value in state_dict.items():
588
+ diffusers_name = key.replace("proj.0", "ff.net.0.proj")
589
+ diffusers_name = diffusers_name.replace("proj.2", "ff.net.2")
590
+ updated_state_dict[diffusers_name] = value
591
+
592
+ image_projection.load_state_dict(updated_state_dict)
593
+ return image_projection
594
+
595
+ def _load_ip_adapter_weights(self, state_dict):
596
+ from diffusers.models.attention_processor import (
597
+ AttnProcessor,
598
+ AttnProcessor2_0,
599
+ )
600
+
601
+ num_image_text_embeds = 4
602
+
603
+ self.unet.encoder_hid_proj = None
604
+
605
+ # set ip-adapter cross-attention processors & load state_dict
606
+ attn_procs = {}
607
+ key_id = 0
608
+ for name in self.unet.attn_processors.keys():
609
+ cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim
610
+ if name.startswith("mid_block"):
611
+ hidden_size = self.unet.config.block_out_channels[-1]
612
+ elif name.startswith("up_blocks"):
613
+ block_id = int(name[len("up_blocks.")])
614
+ hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id]
615
+ elif name.startswith("down_blocks"):
616
+ block_id = int(name[len("down_blocks.")])
617
+ hidden_size = self.unet.config.block_out_channels[block_id]
618
+ if cross_attention_dim is None or "motion_modules" in name:
619
+ attn_processor_class = (
620
+ AttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else AttnProcessor
621
+ )
622
+ attn_procs[name] = attn_processor_class()
623
+ rank = state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"].shape[0]
624
+ attn_module = self.unet
625
+ for n in name.split(".")[:-1]:
626
+ attn_module = getattr(attn_module, n)
627
+ # Set the `lora_layer` attribute of the attention-related matrices.
628
+ attn_module.to_q.set_lora_layer(
629
+ LoRALinearLayer(
630
+ in_features=attn_module.to_q.in_features,
631
+ out_features=attn_module.to_q.out_features,
632
+ rank=rank,
633
+ )
634
+ )
635
+ attn_module.to_k.set_lora_layer(
636
+ LoRALinearLayer(
637
+ in_features=attn_module.to_k.in_features,
638
+ out_features=attn_module.to_k.out_features,
639
+ rank=rank,
640
+ )
641
+ )
642
+ attn_module.to_v.set_lora_layer(
643
+ LoRALinearLayer(
644
+ in_features=attn_module.to_v.in_features,
645
+ out_features=attn_module.to_v.out_features,
646
+ rank=rank,
647
+ )
648
+ )
649
+ attn_module.to_out[0].set_lora_layer(
650
+ LoRALinearLayer(
651
+ in_features=attn_module.to_out[0].in_features,
652
+ out_features=attn_module.to_out[0].out_features,
653
+ rank=rank,
654
+ )
655
+ )
656
+
657
+ value_dict = {}
658
+ for k, module in attn_module.named_children():
659
+ index = "."
660
+ if not hasattr(module, "set_lora_layer"):
661
+ index = ".0."
662
+ module = module[0]
663
+ lora_layer = getattr(module, "lora_layer")
664
+ for lora_name, w in lora_layer.state_dict().items():
665
+ value_dict.update(
666
+ {
667
+ f"{k}{index}lora_layer.{lora_name}": state_dict["ip_adapter"][
668
+ f"{key_id}.{k}_lora.{lora_name}"
669
+ ]
670
+ }
671
+ )
672
+
673
+ attn_module.load_state_dict(value_dict, strict=False)
674
+ attn_module.to(dtype=self.dtype, device=self.device)
675
+ key_id += 1
676
+ else:
677
+ rank = state_dict["ip_adapter"][f"{key_id}.to_q_lora.down.weight"].shape[0]
678
+ attn_processor_class = (
679
+ LoRAIPAdapterAttnProcessor2_0
680
+ if hasattr(F, "scaled_dot_product_attention")
681
+ else LoRAIPAdapterAttnProcessor
682
+ )
683
+ attn_procs[name] = attn_processor_class(
684
+ hidden_size=hidden_size,
685
+ cross_attention_dim=cross_attention_dim,
686
+ scale=1.0,
687
+ rank=rank,
688
+ num_tokens=num_image_text_embeds,
689
+ ).to(dtype=self.dtype, device=self.device)
690
+
691
+ value_dict = {}
692
+ for k, w in attn_procs[name].state_dict().items():
693
+ value_dict.update({f"{k}": state_dict["ip_adapter"][f"{key_id}.{k}"]})
694
+
695
+ attn_procs[name].load_state_dict(value_dict)
696
+ key_id += 1
697
+
698
+ self.unet.set_attn_processor(attn_procs)
699
+
700
+ # convert IP-Adapter Image Projection layers to diffusers
701
+ image_projection = self.convert_ip_adapter_image_proj_to_diffusers(state_dict["image_proj"])
702
+
703
+ self.unet.encoder_hid_proj = image_projection.to(device=self.device, dtype=self.dtype)
704
+ self.unet.config.encoder_hid_dim_type = "ip_image_proj"
705
+
706
+ def set_ip_adapter_scale(self, scale):
707
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
708
+ for attn_processor in unet.attn_processors.values():
709
+ if isinstance(attn_processor, (LoRAIPAdapterAttnProcessor, LoRAIPAdapterAttnProcessor2_0)):
710
+ attn_processor.scale = scale
711
+
712
+ def enable_vae_slicing(self):
713
+ r"""
714
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
715
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
716
+ """
717
+ self.vae.enable_slicing()
718
+
719
+ def disable_vae_slicing(self):
720
+ r"""
721
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
722
+ computing decoding in one step.
723
+ """
724
+ self.vae.disable_slicing()
725
+
726
+ def enable_vae_tiling(self):
727
+ r"""
728
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
729
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
730
+ processing larger images.
731
+ """
732
+ self.vae.enable_tiling()
733
+
734
+ def disable_vae_tiling(self):
735
+ r"""
736
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
737
+ computing decoding in one step.
738
+ """
739
+ self.vae.disable_tiling()
740
+
741
+ def _encode_prompt(
742
+ self,
743
+ prompt,
744
+ device,
745
+ num_images_per_prompt,
746
+ do_classifier_free_guidance,
747
+ negative_prompt=None,
748
+ prompt_embeds: Optional[torch.FloatTensor] = None,
749
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
750
+ lora_scale: Optional[float] = None,
751
+ **kwargs,
752
+ ):
753
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
754
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
755
+
756
+ prompt_embeds_tuple = self.encode_prompt(
757
+ prompt=prompt,
758
+ device=device,
759
+ num_images_per_prompt=num_images_per_prompt,
760
+ do_classifier_free_guidance=do_classifier_free_guidance,
761
+ negative_prompt=negative_prompt,
762
+ prompt_embeds=prompt_embeds,
763
+ negative_prompt_embeds=negative_prompt_embeds,
764
+ lora_scale=lora_scale,
765
+ **kwargs,
766
+ )
767
+
768
+ # concatenate for backwards comp
769
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
770
+
771
+ return prompt_embeds
772
+
773
+ def encode_prompt(
774
+ self,
775
+ prompt,
776
+ device,
777
+ num_images_per_prompt,
778
+ do_classifier_free_guidance,
779
+ negative_prompt=None,
780
+ prompt_embeds: Optional[torch.FloatTensor] = None,
781
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
782
+ lora_scale: Optional[float] = None,
783
+ clip_skip: Optional[int] = None,
784
+ ):
785
+ r"""
786
+ Encodes the prompt into text encoder hidden states.
787
+
788
+ Args:
789
+ prompt (`str` or `List[str]`, *optional*):
790
+ prompt to be encoded
791
+ device: (`torch.device`):
792
+ torch device
793
+ num_images_per_prompt (`int`):
794
+ number of images that should be generated per prompt
795
+ do_classifier_free_guidance (`bool`):
796
+ whether to use classifier free guidance or not
797
+ negative_prompt (`str` or `List[str]`, *optional*):
798
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
799
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
800
+ less than `1`).
801
+ prompt_embeds (`torch.FloatTensor`, *optional*):
802
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
803
+ provided, text embeddings will be generated from `prompt` input argument.
804
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
805
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
806
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
807
+ argument.
808
+ lora_scale (`float`, *optional*):
809
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
810
+ clip_skip (`int`, *optional*):
811
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
812
+ the output of the pre-final layer will be used for computing the prompt embeddings.
813
+ """
814
+ # set lora scale so that monkey patched LoRA
815
+ # function of text encoder can correctly access it
816
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
817
+ self._lora_scale = lora_scale
818
+
819
+ # dynamically adjust the LoRA scale
820
+ if not USE_PEFT_BACKEND:
821
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
822
+ else:
823
+ scale_lora_layers(self.text_encoder, lora_scale)
824
+
825
+ if prompt is not None and isinstance(prompt, str):
826
+ batch_size = 1
827
+ elif prompt is not None and isinstance(prompt, list):
828
+ batch_size = len(prompt)
829
+ else:
830
+ batch_size = prompt_embeds.shape[0]
831
+
832
+ if prompt_embeds is None:
833
+ # textual inversion: procecss multi-vector tokens if necessary
834
+ if isinstance(self, TextualInversionLoaderMixin):
835
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
836
+
837
+ text_inputs = self.tokenizer(
838
+ prompt,
839
+ padding="max_length",
840
+ max_length=self.tokenizer.model_max_length,
841
+ truncation=True,
842
+ return_tensors="pt",
843
+ )
844
+ text_input_ids = text_inputs.input_ids
845
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
846
+
847
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
848
+ text_input_ids, untruncated_ids
849
+ ):
850
+ removed_text = self.tokenizer.batch_decode(
851
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
852
+ )
853
+ logger.warning(
854
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
855
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
856
+ )
857
+
858
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
859
+ attention_mask = text_inputs.attention_mask.to(device)
860
+ else:
861
+ attention_mask = None
862
+
863
+ if clip_skip is None:
864
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
865
+ prompt_embeds = prompt_embeds[0]
866
+ else:
867
+ prompt_embeds = self.text_encoder(
868
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
869
+ )
870
+ # Access the `hidden_states` first, that contains a tuple of
871
+ # all the hidden states from the encoder layers. Then index into
872
+ # the tuple to access the hidden states from the desired layer.
873
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
874
+ # We also need to apply the final LayerNorm here to not mess with the
875
+ # representations. The `last_hidden_states` that we typically use for
876
+ # obtaining the final prompt representations passes through the LayerNorm
877
+ # layer.
878
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
879
+
880
+ if self.text_encoder is not None:
881
+ prompt_embeds_dtype = self.text_encoder.dtype
882
+ elif self.unet is not None:
883
+ prompt_embeds_dtype = self.unet.dtype
884
+ else:
885
+ prompt_embeds_dtype = prompt_embeds.dtype
886
+
887
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
888
+
889
+ bs_embed, seq_len, _ = prompt_embeds.shape
890
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
891
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
892
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
893
+
894
+ # get unconditional embeddings for classifier free guidance
895
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
896
+ uncond_tokens: List[str]
897
+ if negative_prompt is None:
898
+ uncond_tokens = [""] * batch_size
899
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
900
+ raise TypeError(
901
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
902
+ f" {type(prompt)}."
903
+ )
904
+ elif isinstance(negative_prompt, str):
905
+ uncond_tokens = [negative_prompt]
906
+ elif batch_size != len(negative_prompt):
907
+ raise ValueError(
908
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
909
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
910
+ " the batch size of `prompt`."
911
+ )
912
+ else:
913
+ uncond_tokens = negative_prompt
914
+
915
+ # textual inversion: procecss multi-vector tokens if necessary
916
+ if isinstance(self, TextualInversionLoaderMixin):
917
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
918
+
919
+ max_length = prompt_embeds.shape[1]
920
+ uncond_input = self.tokenizer(
921
+ uncond_tokens,
922
+ padding="max_length",
923
+ max_length=max_length,
924
+ truncation=True,
925
+ return_tensors="pt",
926
+ )
927
+
928
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
929
+ attention_mask = uncond_input.attention_mask.to(device)
930
+ else:
931
+ attention_mask = None
932
+
933
+ negative_prompt_embeds = self.text_encoder(
934
+ uncond_input.input_ids.to(device),
935
+ attention_mask=attention_mask,
936
+ )
937
+ negative_prompt_embeds = negative_prompt_embeds[0]
938
+
939
+ if do_classifier_free_guidance:
940
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
941
+ seq_len = negative_prompt_embeds.shape[1]
942
+
943
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
944
+
945
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
946
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
947
+
948
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
949
+ # Retrieve the original scale by scaling back the LoRA layers
950
+ unscale_lora_layers(self.text_encoder, lora_scale)
951
+
952
+ return prompt_embeds, negative_prompt_embeds
953
+
954
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
955
+ dtype = next(self.image_encoder.parameters()).dtype
956
+
957
+ if not isinstance(image, torch.Tensor):
958
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
959
+
960
+ image = image.to(device=device, dtype=dtype)
961
+ if output_hidden_states:
962
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
963
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
964
+ uncond_image_enc_hidden_states = self.image_encoder(
965
+ torch.zeros_like(image), output_hidden_states=True
966
+ ).hidden_states[-2]
967
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
968
+ num_images_per_prompt, dim=0
969
+ )
970
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
971
+ else:
972
+ image_embeds = self.image_encoder(image).image_embeds
973
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
974
+ uncond_image_embeds = torch.zeros_like(image_embeds)
975
+
976
+ return image_embeds, uncond_image_embeds
977
+
978
+ def run_safety_checker(self, image, device, dtype):
979
+ if self.safety_checker is None:
980
+ has_nsfw_concept = None
981
+ else:
982
+ if torch.is_tensor(image):
983
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
984
+ else:
985
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
986
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
987
+ image, has_nsfw_concept = self.safety_checker(
988
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
989
+ )
990
+ return image, has_nsfw_concept
991
+
992
+ def decode_latents(self, latents):
993
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
994
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
995
+
996
+ latents = 1 / self.vae.config.scaling_factor * latents
997
+ image = self.vae.decode(latents, return_dict=False)[0]
998
+ image = (image / 2 + 0.5).clamp(0, 1)
999
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
1000
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1001
+ return image
1002
+
1003
+ def prepare_extra_step_kwargs(self, generator, eta):
1004
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
1005
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
1006
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
1007
+ # and should be between [0, 1]
1008
+
1009
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
1010
+ extra_step_kwargs = {}
1011
+ if accepts_eta:
1012
+ extra_step_kwargs["eta"] = eta
1013
+
1014
+ # check if the scheduler accepts generator
1015
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
1016
+ if accepts_generator:
1017
+ extra_step_kwargs["generator"] = generator
1018
+ return extra_step_kwargs
1019
+
1020
+ def check_inputs(
1021
+ self,
1022
+ prompt,
1023
+ height,
1024
+ width,
1025
+ callback_steps,
1026
+ negative_prompt=None,
1027
+ prompt_embeds=None,
1028
+ negative_prompt_embeds=None,
1029
+ callback_on_step_end_tensor_inputs=None,
1030
+ ):
1031
+ if height % 8 != 0 or width % 8 != 0:
1032
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
1033
+
1034
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
1035
+ raise ValueError(
1036
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
1037
+ f" {type(callback_steps)}."
1038
+ )
1039
+ if callback_on_step_end_tensor_inputs is not None and not all(
1040
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
1041
+ ):
1042
+ raise ValueError(
1043
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
1044
+ )
1045
+
1046
+ if prompt is not None and prompt_embeds is not None:
1047
+ raise ValueError(
1048
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
1049
+ " only forward one of the two."
1050
+ )
1051
+ elif prompt is None and prompt_embeds is None:
1052
+ raise ValueError(
1053
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
1054
+ )
1055
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
1056
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
1057
+
1058
+ if negative_prompt is not None and negative_prompt_embeds is not None:
1059
+ raise ValueError(
1060
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
1061
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
1062
+ )
1063
+
1064
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
1065
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
1066
+ raise ValueError(
1067
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
1068
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
1069
+ f" {negative_prompt_embeds.shape}."
1070
+ )
1071
+
1072
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
1073
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
1074
+ if isinstance(generator, list) and len(generator) != batch_size:
1075
+ raise ValueError(
1076
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
1077
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
1078
+ )
1079
+
1080
+ if latents is None:
1081
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1082
+ else:
1083
+ latents = latents.to(device)
1084
+
1085
+ # scale the initial noise by the standard deviation required by the scheduler
1086
+ latents = latents * self.scheduler.init_noise_sigma
1087
+ return latents
1088
+
1089
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
1090
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
1091
+
1092
+ The suffixes after the scaling factors represent the stages where they are being applied.
1093
+
1094
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
1095
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
1096
+
1097
+ Args:
1098
+ s1 (`float`):
1099
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
1100
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1101
+ s2 (`float`):
1102
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
1103
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1104
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
1105
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
1106
+ """
1107
+ if not hasattr(self, "unet"):
1108
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
1109
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
1110
+
1111
+ def disable_freeu(self):
1112
+ """Disables the FreeU mechanism if enabled."""
1113
+ self.unet.disable_freeu()
1114
+
1115
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.fuse_qkv_projections
1116
+ def fuse_qkv_projections(self, unet: bool = True, vae: bool = True):
1117
+ """
1118
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
1119
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
1120
+
1121
+ <Tip warning={true}>
1122
+
1123
+ This API is 🧪 experimental.
1124
+
1125
+ </Tip>
1126
+
1127
+ Args:
1128
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
1129
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
1130
+ """
1131
+ self.fusing_unet = False
1132
+ self.fusing_vae = False
1133
+
1134
+ if unet:
1135
+ self.fusing_unet = True
1136
+ self.unet.fuse_qkv_projections()
1137
+ self.unet.set_attn_processor(FusedAttnProcessor2_0())
1138
+
1139
+ if vae:
1140
+ if not isinstance(self.vae, AutoencoderKL):
1141
+ raise ValueError("`fuse_qkv_projections()` is only supported for the VAE of type `AutoencoderKL`.")
1142
+
1143
+ self.fusing_vae = True
1144
+ self.vae.fuse_qkv_projections()
1145
+ self.vae.set_attn_processor(FusedAttnProcessor2_0())
1146
+
1147
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.unfuse_qkv_projections
1148
+ def unfuse_qkv_projections(self, unet: bool = True, vae: bool = True):
1149
+ """Disable QKV projection fusion if enabled.
1150
+
1151
+ <Tip warning={true}>
1152
+
1153
+ This API is 🧪 experimental.
1154
+
1155
+ </Tip>
1156
+
1157
+ Args:
1158
+ unet (`bool`, defaults to `True`): To apply fusion on the UNet.
1159
+ vae (`bool`, defaults to `True`): To apply fusion on the VAE.
1160
+
1161
+ """
1162
+ if unet:
1163
+ if not self.fusing_unet:
1164
+ logger.warning("The UNet was not initially fused for QKV projections. Doing nothing.")
1165
+ else:
1166
+ self.unet.unfuse_qkv_projections()
1167
+ self.fusing_unet = False
1168
+
1169
+ if vae:
1170
+ if not self.fusing_vae:
1171
+ logger.warning("The VAE was not initially fused for QKV projections. Doing nothing.")
1172
+ else:
1173
+ self.vae.unfuse_qkv_projections()
1174
+ self.fusing_vae = False
1175
+
1176
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1177
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
1178
+ """
1179
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
1180
+
1181
+ Args:
1182
+ timesteps (`torch.Tensor`):
1183
+ generate embedding vectors at these timesteps
1184
+ embedding_dim (`int`, *optional*, defaults to 512):
1185
+ dimension of the embeddings to generate
1186
+ dtype:
1187
+ data type of the generated embeddings
1188
+
1189
+ Returns:
1190
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
1191
+ """
1192
+ assert len(w.shape) == 1
1193
+ w = w * 1000.0
1194
+
1195
+ half_dim = embedding_dim // 2
1196
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
1197
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
1198
+ emb = w.to(dtype)[:, None] * emb[None, :]
1199
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
1200
+ if embedding_dim % 2 == 1: # zero pad
1201
+ emb = torch.nn.functional.pad(emb, (0, 1))
1202
+ assert emb.shape == (w.shape[0], embedding_dim)
1203
+ return emb
1204
+
1205
+ @property
1206
+ def guidance_scale(self):
1207
+ return self._guidance_scale
1208
+
1209
+ @property
1210
+ def guidance_rescale(self):
1211
+ return self._guidance_rescale
1212
+
1213
+ @property
1214
+ def clip_skip(self):
1215
+ return self._clip_skip
1216
+
1217
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1218
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1219
+ # corresponds to doing no classifier free guidance.
1220
+ @property
1221
+ def do_classifier_free_guidance(self):
1222
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
1223
+
1224
+ @property
1225
+ def cross_attention_kwargs(self):
1226
+ return self._cross_attention_kwargs
1227
+
1228
+ @property
1229
+ def num_timesteps(self):
1230
+ return self._num_timesteps
1231
+
1232
+ @property
1233
+ def interrupt(self):
1234
+ return self._interrupt
1235
+
1236
+ @torch.no_grad()
1237
+ def __call__(
1238
+ self,
1239
+ prompt: Union[str, List[str]] = None,
1240
+ height: Optional[int] = None,
1241
+ width: Optional[int] = None,
1242
+ num_inference_steps: int = 50,
1243
+ timesteps: List[int] = None,
1244
+ guidance_scale: float = 7.5,
1245
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1246
+ num_images_per_prompt: Optional[int] = 1,
1247
+ eta: float = 0.0,
1248
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1249
+ latents: Optional[torch.FloatTensor] = None,
1250
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1251
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1252
+ image_embeds: Optional[torch.FloatTensor] = None,
1253
+ output_type: Optional[str] = "pil",
1254
+ return_dict: bool = True,
1255
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1256
+ guidance_rescale: float = 0.0,
1257
+ clip_skip: Optional[int] = None,
1258
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
1259
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
1260
+ **kwargs,
1261
+ ):
1262
+ r"""
1263
+ The call function to the pipeline for generation.
1264
+
1265
+ Args:
1266
+ prompt (`str` or `List[str]`, *optional*):
1267
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
1268
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1269
+ The height in pixels of the generated image.
1270
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
1271
+ The width in pixels of the generated image.
1272
+ num_inference_steps (`int`, *optional*, defaults to 50):
1273
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1274
+ expense of slower inference.
1275
+ timesteps (`List[int]`, *optional*):
1276
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
1277
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
1278
+ passed will be used. Must be in descending order.
1279
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1280
+ A higher guidance scale value encourages the model to generate images closely linked to the text
1281
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
1282
+ negative_prompt (`str` or `List[str]`, *optional*):
1283
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
1284
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
1285
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1286
+ The number of images to generate per prompt.
1287
+ eta (`float`, *optional*, defaults to 0.0):
1288
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
1289
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
1290
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1291
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
1292
+ generation deterministic.
1293
+ latents (`torch.FloatTensor`, *optional*):
1294
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
1295
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1296
+ tensor is generated by sampling using the supplied random `generator`.
1297
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1298
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
1299
+ provided, text embeddings are generated from the `prompt` input argument.
1300
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1301
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
1302
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
1303
+ image_embeds (`torch.FloatTensor`, *optional*):
1304
+ Pre-generated image embeddings.
1305
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
1306
+ output_type (`str`, *optional*, defaults to `"pil"`):
1307
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1308
+ return_dict (`bool`, *optional*, defaults to `True`):
1309
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1310
+ plain tuple.
1311
+ cross_attention_kwargs (`dict`, *optional*):
1312
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1313
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1314
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
1315
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
1316
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
1317
+ using zero terminal SNR.
1318
+ clip_skip (`int`, *optional*):
1319
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1320
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1321
+ callback_on_step_end (`Callable`, *optional*):
1322
+ A function that calls at the end of each denoising steps during the inference. The function is called
1323
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1324
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1325
+ `callback_on_step_end_tensor_inputs`.
1326
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1327
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1328
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1329
+ `._callback_tensor_inputs` attribute of your pipeline class.
1330
+
1331
+ Examples:
1332
+
1333
+ Returns:
1334
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1335
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1336
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
1337
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
1338
+ "not-safe-for-work" (nsfw) content.
1339
+ """
1340
+
1341
+ callback = kwargs.pop("callback", None)
1342
+ callback_steps = kwargs.pop("callback_steps", None)
1343
+
1344
+ if callback is not None:
1345
+ deprecate(
1346
+ "callback",
1347
+ "1.0.0",
1348
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1349
+ )
1350
+ if callback_steps is not None:
1351
+ deprecate(
1352
+ "callback_steps",
1353
+ "1.0.0",
1354
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1355
+ )
1356
+
1357
+ # 0. Default height and width to unet
1358
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
1359
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
1360
+ # to deal with lora scaling and other possible forward hooks
1361
+
1362
+ # 1. Check inputs. Raise error if not correct
1363
+ self.check_inputs(
1364
+ prompt,
1365
+ height,
1366
+ width,
1367
+ callback_steps,
1368
+ negative_prompt,
1369
+ prompt_embeds,
1370
+ negative_prompt_embeds,
1371
+ callback_on_step_end_tensor_inputs,
1372
+ )
1373
+
1374
+ self._guidance_scale = guidance_scale
1375
+ self._guidance_rescale = guidance_rescale
1376
+ self._clip_skip = clip_skip
1377
+ self._cross_attention_kwargs = cross_attention_kwargs
1378
+ self._interrupt = False
1379
+
1380
+ # 2. Define call parameters
1381
+ if prompt is not None and isinstance(prompt, str):
1382
+ batch_size = 1
1383
+ elif prompt is not None and isinstance(prompt, list):
1384
+ batch_size = len(prompt)
1385
+ else:
1386
+ batch_size = prompt_embeds.shape[0]
1387
+
1388
+ device = self._execution_device
1389
+
1390
+ # 3. Encode input prompt
1391
+ lora_scale = (
1392
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1393
+ )
1394
+
1395
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1396
+ prompt,
1397
+ device,
1398
+ num_images_per_prompt,
1399
+ self.do_classifier_free_guidance,
1400
+ negative_prompt,
1401
+ prompt_embeds=prompt_embeds,
1402
+ negative_prompt_embeds=negative_prompt_embeds,
1403
+ lora_scale=lora_scale,
1404
+ clip_skip=self.clip_skip,
1405
+ )
1406
+
1407
+ # For classifier free guidance, we need to do two forward passes.
1408
+ # Here we concatenate the unconditional and text embeddings into a single batch
1409
+ # to avoid doing two forward passes
1410
+ if self.do_classifier_free_guidance:
1411
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1412
+
1413
+ if image_embeds is not None:
1414
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0).to(
1415
+ device=device, dtype=prompt_embeds.dtype
1416
+ )
1417
+ negative_image_embeds = torch.zeros_like(image_embeds)
1418
+ if self.do_classifier_free_guidance:
1419
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
1420
+
1421
+ # 4. Prepare timesteps
1422
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1423
+
1424
+ # 5. Prepare latent variables
1425
+ num_channels_latents = self.unet.config.in_channels
1426
+ latents = self.prepare_latents(
1427
+ batch_size * num_images_per_prompt,
1428
+ num_channels_latents,
1429
+ height,
1430
+ width,
1431
+ prompt_embeds.dtype,
1432
+ device,
1433
+ generator,
1434
+ latents,
1435
+ )
1436
+
1437
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1438
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1439
+
1440
+ # 6.1 Add image embeds for IP-Adapter
1441
+ added_cond_kwargs = {"image_embeds": image_embeds} if image_embeds is not None else None
1442
+
1443
+ # 6.2 Optionally get Guidance Scale Embedding
1444
+ timestep_cond = None
1445
+ if self.unet.config.time_cond_proj_dim is not None:
1446
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1447
+ timestep_cond = self.get_guidance_scale_embedding(
1448
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1449
+ ).to(device=device, dtype=latents.dtype)
1450
+
1451
+ # 7. Denoising loop
1452
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1453
+ self._num_timesteps = len(timesteps)
1454
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1455
+ for i, t in enumerate(timesteps):
1456
+ if self.interrupt:
1457
+ continue
1458
+
1459
+ # expand the latents if we are doing classifier free guidance
1460
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1461
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1462
+
1463
+ # predict the noise residual
1464
+ noise_pred = self.unet(
1465
+ latent_model_input,
1466
+ t,
1467
+ encoder_hidden_states=prompt_embeds,
1468
+ timestep_cond=timestep_cond,
1469
+ cross_attention_kwargs=self.cross_attention_kwargs,
1470
+ added_cond_kwargs=added_cond_kwargs,
1471
+ return_dict=False,
1472
+ )[0]
1473
+
1474
+ # perform guidance
1475
+ if self.do_classifier_free_guidance:
1476
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1477
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1478
+
1479
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1480
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1481
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1482
+
1483
+ # compute the previous noisy sample x_t -> x_t-1
1484
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1485
+
1486
+ if callback_on_step_end is not None:
1487
+ callback_kwargs = {}
1488
+ for k in callback_on_step_end_tensor_inputs:
1489
+ callback_kwargs[k] = locals()[k]
1490
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1491
+
1492
+ latents = callback_outputs.pop("latents", latents)
1493
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1494
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1495
+
1496
+ # call the callback, if provided
1497
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1498
+ progress_bar.update()
1499
+ if callback is not None and i % callback_steps == 0:
1500
+ step_idx = i // getattr(self.scheduler, "order", 1)
1501
+ callback(step_idx, t, latents)
1502
+
1503
+ if not output_type == "latent":
1504
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1505
+ 0
1506
+ ]
1507
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1508
+ else:
1509
+ image = latents
1510
+ has_nsfw_concept = None
1511
+
1512
+ if has_nsfw_concept is None:
1513
+ do_denormalize = [True] * image.shape[0]
1514
+ else:
1515
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1516
+
1517
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1518
+
1519
+ # Offload all models
1520
+ self.maybe_free_model_hooks()
1521
+
1522
+ if not return_dict:
1523
+ return (image, has_nsfw_concept)
1524
+
1525
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/latent_consistency_img2img.py ADDED
@@ -0,0 +1,827 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import PIL.Image
24
+ import torch
25
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
26
+
27
+ from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
28
+ from diffusers.configuration_utils import register_to_config
29
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
30
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
31
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
32
+ from diffusers.utils import BaseOutput
33
+ from diffusers.utils.torch_utils import randn_tensor
34
+
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+
39
+ class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):
40
+ _optional_components = ["scheduler"]
41
+
42
+ def __init__(
43
+ self,
44
+ vae: AutoencoderKL,
45
+ text_encoder: CLIPTextModel,
46
+ tokenizer: CLIPTokenizer,
47
+ unet: UNet2DConditionModel,
48
+ scheduler: "LCMSchedulerWithTimestamp",
49
+ safety_checker: StableDiffusionSafetyChecker,
50
+ feature_extractor: CLIPImageProcessor,
51
+ requires_safety_checker: bool = True,
52
+ ):
53
+ super().__init__()
54
+
55
+ scheduler = (
56
+ scheduler
57
+ if scheduler is not None
58
+ else LCMSchedulerWithTimestamp(
59
+ beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
60
+ )
61
+ )
62
+
63
+ self.register_modules(
64
+ vae=vae,
65
+ text_encoder=text_encoder,
66
+ tokenizer=tokenizer,
67
+ unet=unet,
68
+ scheduler=scheduler,
69
+ safety_checker=safety_checker,
70
+ feature_extractor=feature_extractor,
71
+ )
72
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
73
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
74
+
75
+ def _encode_prompt(
76
+ self,
77
+ prompt,
78
+ device,
79
+ num_images_per_prompt,
80
+ prompt_embeds: None,
81
+ ):
82
+ r"""
83
+ Encodes the prompt into text encoder hidden states.
84
+ Args:
85
+ prompt (`str` or `List[str]`, *optional*):
86
+ prompt to be encoded
87
+ device: (`torch.device`):
88
+ torch device
89
+ num_images_per_prompt (`int`):
90
+ number of images that should be generated per prompt
91
+ prompt_embeds (`torch.FloatTensor`, *optional*):
92
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
93
+ provided, text embeddings will be generated from `prompt` input argument.
94
+ """
95
+
96
+ if prompt is not None and isinstance(prompt, str):
97
+ pass
98
+ elif prompt is not None and isinstance(prompt, list):
99
+ len(prompt)
100
+ else:
101
+ prompt_embeds.shape[0]
102
+
103
+ if prompt_embeds is None:
104
+ text_inputs = self.tokenizer(
105
+ prompt,
106
+ padding="max_length",
107
+ max_length=self.tokenizer.model_max_length,
108
+ truncation=True,
109
+ return_tensors="pt",
110
+ )
111
+ text_input_ids = text_inputs.input_ids
112
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
113
+
114
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
115
+ text_input_ids, untruncated_ids
116
+ ):
117
+ removed_text = self.tokenizer.batch_decode(
118
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
119
+ )
120
+ logger.warning(
121
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
122
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
123
+ )
124
+
125
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
126
+ attention_mask = text_inputs.attention_mask.to(device)
127
+ else:
128
+ attention_mask = None
129
+
130
+ prompt_embeds = self.text_encoder(
131
+ text_input_ids.to(device),
132
+ attention_mask=attention_mask,
133
+ )
134
+ prompt_embeds = prompt_embeds[0]
135
+
136
+ if self.text_encoder is not None:
137
+ prompt_embeds_dtype = self.text_encoder.dtype
138
+ elif self.unet is not None:
139
+ prompt_embeds_dtype = self.unet.dtype
140
+ else:
141
+ prompt_embeds_dtype = prompt_embeds.dtype
142
+
143
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
144
+
145
+ bs_embed, seq_len, _ = prompt_embeds.shape
146
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
147
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
148
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
149
+
150
+ # Don't need to get uncond prompt embedding because of LCM Guided Distillation
151
+ return prompt_embeds
152
+
153
+ def run_safety_checker(self, image, device, dtype):
154
+ if self.safety_checker is None:
155
+ has_nsfw_concept = None
156
+ else:
157
+ if torch.is_tensor(image):
158
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
159
+ else:
160
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
161
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
162
+ image, has_nsfw_concept = self.safety_checker(
163
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
164
+ )
165
+ return image, has_nsfw_concept
166
+
167
+ def prepare_latents(
168
+ self,
169
+ image,
170
+ timestep,
171
+ batch_size,
172
+ num_channels_latents,
173
+ height,
174
+ width,
175
+ dtype,
176
+ device,
177
+ latents=None,
178
+ generator=None,
179
+ ):
180
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
181
+
182
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
183
+ raise ValueError(
184
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
185
+ )
186
+
187
+ image = image.to(device=device, dtype=dtype)
188
+
189
+ # batch_size = batch_size * num_images_per_prompt
190
+
191
+ if image.shape[1] == 4:
192
+ init_latents = image
193
+
194
+ else:
195
+ if isinstance(generator, list) and len(generator) != batch_size:
196
+ raise ValueError(
197
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
198
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
199
+ )
200
+
201
+ elif isinstance(generator, list):
202
+ init_latents = [
203
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
204
+ ]
205
+ init_latents = torch.cat(init_latents, dim=0)
206
+ else:
207
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
208
+
209
+ init_latents = self.vae.config.scaling_factor * init_latents
210
+
211
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
212
+ # expand init_latents for batch_size
213
+ (
214
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
215
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
216
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
217
+ " your script to pass as many initial images as text prompts to suppress this warning."
218
+ )
219
+ # deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
220
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
221
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
222
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
223
+ raise ValueError(
224
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
225
+ )
226
+ else:
227
+ init_latents = torch.cat([init_latents], dim=0)
228
+
229
+ shape = init_latents.shape
230
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
231
+
232
+ # get latents
233
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
234
+ latents = init_latents
235
+
236
+ return latents
237
+
238
+ if latents is None:
239
+ latents = torch.randn(shape, dtype=dtype).to(device)
240
+ else:
241
+ latents = latents.to(device)
242
+ # scale the initial noise by the standard deviation required by the scheduler
243
+ latents = latents * self.scheduler.init_noise_sigma
244
+ return latents
245
+
246
+ def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
247
+ """
248
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
249
+ Args:
250
+ timesteps: torch.Tensor: generate embedding vectors at these timesteps
251
+ embedding_dim: int: dimension of the embeddings to generate
252
+ dtype: data type of the generated embeddings
253
+ Returns:
254
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
255
+ """
256
+ assert len(w.shape) == 1
257
+ w = w * 1000.0
258
+
259
+ half_dim = embedding_dim // 2
260
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
261
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
262
+ emb = w.to(dtype)[:, None] * emb[None, :]
263
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
264
+ if embedding_dim % 2 == 1: # zero pad
265
+ emb = torch.nn.functional.pad(emb, (0, 1))
266
+ assert emb.shape == (w.shape[0], embedding_dim)
267
+ return emb
268
+
269
+ def get_timesteps(self, num_inference_steps, strength, device):
270
+ # get the original timestep using init_timestep
271
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
272
+
273
+ t_start = max(num_inference_steps - init_timestep, 0)
274
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
275
+
276
+ return timesteps, num_inference_steps - t_start
277
+
278
+ @torch.no_grad()
279
+ def __call__(
280
+ self,
281
+ prompt: Union[str, List[str]] = None,
282
+ image: PipelineImageInput = None,
283
+ strength: float = 0.8,
284
+ height: Optional[int] = 768,
285
+ width: Optional[int] = 768,
286
+ guidance_scale: float = 7.5,
287
+ num_images_per_prompt: Optional[int] = 1,
288
+ latents: Optional[torch.FloatTensor] = None,
289
+ num_inference_steps: int = 4,
290
+ lcm_origin_steps: int = 50,
291
+ prompt_embeds: Optional[torch.FloatTensor] = None,
292
+ output_type: Optional[str] = "pil",
293
+ return_dict: bool = True,
294
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
295
+ ):
296
+ # 0. Default height and width to unet
297
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
298
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
299
+
300
+ # 2. Define call parameters
301
+ if prompt is not None and isinstance(prompt, str):
302
+ batch_size = 1
303
+ elif prompt is not None and isinstance(prompt, list):
304
+ batch_size = len(prompt)
305
+ else:
306
+ batch_size = prompt_embeds.shape[0]
307
+
308
+ device = self._execution_device
309
+ # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
310
+
311
+ # 3. Encode input prompt
312
+ prompt_embeds = self._encode_prompt(
313
+ prompt,
314
+ device,
315
+ num_images_per_prompt,
316
+ prompt_embeds=prompt_embeds,
317
+ )
318
+
319
+ # 3.5 encode image
320
+ image = self.image_processor.preprocess(image)
321
+
322
+ # 4. Prepare timesteps
323
+ self.scheduler.set_timesteps(strength, num_inference_steps, lcm_origin_steps)
324
+ # timesteps = self.scheduler.timesteps
325
+ # timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)
326
+ timesteps = self.scheduler.timesteps
327
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
328
+
329
+ print("timesteps: ", timesteps)
330
+
331
+ # 5. Prepare latent variable
332
+ num_channels_latents = self.unet.config.in_channels
333
+ latents = self.prepare_latents(
334
+ image,
335
+ latent_timestep,
336
+ batch_size * num_images_per_prompt,
337
+ num_channels_latents,
338
+ height,
339
+ width,
340
+ prompt_embeds.dtype,
341
+ device,
342
+ latents,
343
+ )
344
+ bs = batch_size * num_images_per_prompt
345
+
346
+ # 6. Get Guidance Scale Embedding
347
+ w = torch.tensor(guidance_scale).repeat(bs)
348
+ w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
349
+
350
+ # 7. LCM MultiStep Sampling Loop:
351
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
352
+ for i, t in enumerate(timesteps):
353
+ ts = torch.full((bs,), t, device=device, dtype=torch.long)
354
+ latents = latents.to(prompt_embeds.dtype)
355
+
356
+ # model prediction (v-prediction, eps, x)
357
+ model_pred = self.unet(
358
+ latents,
359
+ ts,
360
+ timestep_cond=w_embedding,
361
+ encoder_hidden_states=prompt_embeds,
362
+ cross_attention_kwargs=cross_attention_kwargs,
363
+ return_dict=False,
364
+ )[0]
365
+
366
+ # compute the previous noisy sample x_t -> x_t-1
367
+ latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
368
+
369
+ # # call the callback, if provided
370
+ # if i == len(timesteps) - 1:
371
+ progress_bar.update()
372
+
373
+ denoised = denoised.to(prompt_embeds.dtype)
374
+ if not output_type == "latent":
375
+ image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
376
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
377
+ else:
378
+ image = denoised
379
+ has_nsfw_concept = None
380
+
381
+ if has_nsfw_concept is None:
382
+ do_denormalize = [True] * image.shape[0]
383
+ else:
384
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
385
+
386
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
387
+
388
+ if not return_dict:
389
+ return (image, has_nsfw_concept)
390
+
391
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
392
+
393
+
394
+ @dataclass
395
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
396
+ class LCMSchedulerOutput(BaseOutput):
397
+ """
398
+ Output class for the scheduler's `step` function output.
399
+ Args:
400
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
401
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
402
+ denoising loop.
403
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
404
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
405
+ `pred_original_sample` can be used to preview progress or for guidance.
406
+ """
407
+
408
+ prev_sample: torch.FloatTensor
409
+ denoised: Optional[torch.FloatTensor] = None
410
+
411
+
412
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
413
+ def betas_for_alpha_bar(
414
+ num_diffusion_timesteps,
415
+ max_beta=0.999,
416
+ alpha_transform_type="cosine",
417
+ ):
418
+ """
419
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
420
+ (1-beta) over time from t = [0,1].
421
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
422
+ to that part of the diffusion process.
423
+ Args:
424
+ num_diffusion_timesteps (`int`): the number of betas to produce.
425
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
426
+ prevent singularities.
427
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
428
+ Choose from `cosine` or `exp`
429
+ Returns:
430
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
431
+ """
432
+ if alpha_transform_type == "cosine":
433
+
434
+ def alpha_bar_fn(t):
435
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
436
+
437
+ elif alpha_transform_type == "exp":
438
+
439
+ def alpha_bar_fn(t):
440
+ return math.exp(t * -12.0)
441
+
442
+ else:
443
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
444
+
445
+ betas = []
446
+ for i in range(num_diffusion_timesteps):
447
+ t1 = i / num_diffusion_timesteps
448
+ t2 = (i + 1) / num_diffusion_timesteps
449
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
450
+ return torch.tensor(betas, dtype=torch.float32)
451
+
452
+
453
+ def rescale_zero_terminal_snr(betas):
454
+ """
455
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
456
+ Args:
457
+ betas (`torch.FloatTensor`):
458
+ the betas that the scheduler is being initialized with.
459
+ Returns:
460
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
461
+ """
462
+ # Convert betas to alphas_bar_sqrt
463
+ alphas = 1.0 - betas
464
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
465
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
466
+
467
+ # Store old values.
468
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
469
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
470
+
471
+ # Shift so the last timestep is zero.
472
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
473
+
474
+ # Scale so the first timestep is back to the old value.
475
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
476
+
477
+ # Convert alphas_bar_sqrt to betas
478
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
479
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
480
+ alphas = torch.cat([alphas_bar[0:1], alphas])
481
+ betas = 1 - alphas
482
+
483
+ return betas
484
+
485
+
486
+ class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin):
487
+ """
488
+ This class modifies LCMScheduler to add a timestamp argument to set_timesteps
489
+
490
+
491
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
492
+ non-Markovian guidance.
493
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
494
+ methods the library implements for all schedulers such as loading and saving.
495
+ Args:
496
+ num_train_timesteps (`int`, defaults to 1000):
497
+ The number of diffusion steps to train the model.
498
+ beta_start (`float`, defaults to 0.0001):
499
+ The starting `beta` value of inference.
500
+ beta_end (`float`, defaults to 0.02):
501
+ The final `beta` value.
502
+ beta_schedule (`str`, defaults to `"linear"`):
503
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
504
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
505
+ trained_betas (`np.ndarray`, *optional*):
506
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
507
+ clip_sample (`bool`, defaults to `True`):
508
+ Clip the predicted sample for numerical stability.
509
+ clip_sample_range (`float`, defaults to 1.0):
510
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
511
+ set_alpha_to_one (`bool`, defaults to `True`):
512
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
513
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
514
+ otherwise it uses the alpha value at step 0.
515
+ steps_offset (`int`, defaults to 0):
516
+ An offset added to the inference steps. You can use a combination of `offset=1` and
517
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
518
+ Diffusion.
519
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
520
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
521
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
522
+ Video](https://imagen.research.google/video/paper.pdf) paper).
523
+ thresholding (`bool`, defaults to `False`):
524
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
525
+ as Stable Diffusion.
526
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
527
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
528
+ sample_max_value (`float`, defaults to 1.0):
529
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
530
+ timestep_spacing (`str`, defaults to `"leading"`):
531
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
532
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
533
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
534
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
535
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
536
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
537
+ """
538
+
539
+ # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
540
+ order = 1
541
+
542
+ @register_to_config
543
+ def __init__(
544
+ self,
545
+ num_train_timesteps: int = 1000,
546
+ beta_start: float = 0.0001,
547
+ beta_end: float = 0.02,
548
+ beta_schedule: str = "linear",
549
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
550
+ clip_sample: bool = True,
551
+ set_alpha_to_one: bool = True,
552
+ steps_offset: int = 0,
553
+ prediction_type: str = "epsilon",
554
+ thresholding: bool = False,
555
+ dynamic_thresholding_ratio: float = 0.995,
556
+ clip_sample_range: float = 1.0,
557
+ sample_max_value: float = 1.0,
558
+ timestep_spacing: str = "leading",
559
+ rescale_betas_zero_snr: bool = False,
560
+ ):
561
+ if trained_betas is not None:
562
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
563
+ elif beta_schedule == "linear":
564
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
565
+ elif beta_schedule == "scaled_linear":
566
+ # this schedule is very specific to the latent diffusion model.
567
+ self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
568
+ elif beta_schedule == "squaredcos_cap_v2":
569
+ # Glide cosine schedule
570
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
571
+ else:
572
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
573
+
574
+ # Rescale for zero SNR
575
+ if rescale_betas_zero_snr:
576
+ self.betas = rescale_zero_terminal_snr(self.betas)
577
+
578
+ self.alphas = 1.0 - self.betas
579
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
580
+
581
+ # At every step in ddim, we are looking into the previous alphas_cumprod
582
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
583
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
584
+ # whether we use the final alpha of the "non-previous" one.
585
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
586
+
587
+ # standard deviation of the initial noise distribution
588
+ self.init_noise_sigma = 1.0
589
+
590
+ # setable values
591
+ self.num_inference_steps = None
592
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
593
+
594
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
595
+ """
596
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
597
+ current timestep.
598
+ Args:
599
+ sample (`torch.FloatTensor`):
600
+ The input sample.
601
+ timestep (`int`, *optional*):
602
+ The current timestep in the diffusion chain.
603
+ Returns:
604
+ `torch.FloatTensor`:
605
+ A scaled input sample.
606
+ """
607
+ return sample
608
+
609
+ def _get_variance(self, timestep, prev_timestep):
610
+ alpha_prod_t = self.alphas_cumprod[timestep]
611
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
612
+ beta_prod_t = 1 - alpha_prod_t
613
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
614
+
615
+ variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
616
+
617
+ return variance
618
+
619
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
620
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
621
+ """
622
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
623
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
624
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
625
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
626
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
627
+ https://arxiv.org/abs/2205.11487
628
+ """
629
+ dtype = sample.dtype
630
+ batch_size, channels, height, width = sample.shape
631
+
632
+ if dtype not in (torch.float32, torch.float64):
633
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
634
+
635
+ # Flatten sample for doing quantile calculation along each image
636
+ sample = sample.reshape(batch_size, channels * height * width)
637
+
638
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
639
+
640
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
641
+ s = torch.clamp(
642
+ s, min=1, max=self.config.sample_max_value
643
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
644
+
645
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
646
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
647
+
648
+ sample = sample.reshape(batch_size, channels, height, width)
649
+ sample = sample.to(dtype)
650
+
651
+ return sample
652
+
653
+ def set_timesteps(
654
+ self, stength, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None
655
+ ):
656
+ """
657
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
658
+ Args:
659
+ num_inference_steps (`int`):
660
+ The number of diffusion steps used when generating samples with a pre-trained model.
661
+ """
662
+
663
+ if num_inference_steps > self.config.num_train_timesteps:
664
+ raise ValueError(
665
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
666
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
667
+ f" maximal {self.config.num_train_timesteps} timesteps."
668
+ )
669
+
670
+ self.num_inference_steps = num_inference_steps
671
+
672
+ # LCM Timesteps Setting: # Linear Spacing
673
+ c = self.config.num_train_timesteps // lcm_origin_steps
674
+ lcm_origin_timesteps = (
675
+ np.asarray(list(range(1, int(lcm_origin_steps * stength) + 1))) * c - 1
676
+ ) # LCM Training Steps Schedule
677
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
678
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
679
+
680
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
681
+
682
+ def get_scalings_for_boundary_condition_discrete(self, t):
683
+ self.sigma_data = 0.5 # Default: 0.5
684
+
685
+ # By dividing 0.1: This is almost a delta function at t=0.
686
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
687
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
688
+ return c_skip, c_out
689
+
690
+ def step(
691
+ self,
692
+ model_output: torch.FloatTensor,
693
+ timeindex: int,
694
+ timestep: int,
695
+ sample: torch.FloatTensor,
696
+ eta: float = 0.0,
697
+ use_clipped_model_output: bool = False,
698
+ generator=None,
699
+ variance_noise: Optional[torch.FloatTensor] = None,
700
+ return_dict: bool = True,
701
+ ) -> Union[LCMSchedulerOutput, Tuple]:
702
+ """
703
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
704
+ process from the learned model outputs (most often the predicted noise).
705
+ Args:
706
+ model_output (`torch.FloatTensor`):
707
+ The direct output from learned diffusion model.
708
+ timestep (`float`):
709
+ The current discrete timestep in the diffusion chain.
710
+ sample (`torch.FloatTensor`):
711
+ A current instance of a sample created by the diffusion process.
712
+ eta (`float`):
713
+ The weight of noise for added noise in diffusion step.
714
+ use_clipped_model_output (`bool`, defaults to `False`):
715
+ If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
716
+ because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
717
+ clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
718
+ `use_clipped_model_output` has no effect.
719
+ generator (`torch.Generator`, *optional*):
720
+ A random number generator.
721
+ variance_noise (`torch.FloatTensor`):
722
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
723
+ itself. Useful for methods such as [`CycleDiffusion`].
724
+ return_dict (`bool`, *optional*, defaults to `True`):
725
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
726
+ Returns:
727
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
728
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
729
+ tuple is returned where the first element is the sample tensor.
730
+ """
731
+ if self.num_inference_steps is None:
732
+ raise ValueError(
733
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
734
+ )
735
+
736
+ # 1. get previous step value
737
+ prev_timeindex = timeindex + 1
738
+ if prev_timeindex < len(self.timesteps):
739
+ prev_timestep = self.timesteps[prev_timeindex]
740
+ else:
741
+ prev_timestep = timestep
742
+
743
+ # 2. compute alphas, betas
744
+ alpha_prod_t = self.alphas_cumprod[timestep]
745
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
746
+
747
+ beta_prod_t = 1 - alpha_prod_t
748
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
749
+
750
+ # 3. Get scalings for boundary conditions
751
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
752
+
753
+ # 4. Different Parameterization:
754
+ parameterization = self.config.prediction_type
755
+
756
+ if parameterization == "epsilon": # noise-prediction
757
+ pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
758
+
759
+ elif parameterization == "sample": # x-prediction
760
+ pred_x0 = model_output
761
+
762
+ elif parameterization == "v_prediction": # v-prediction
763
+ pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
764
+
765
+ # 4. Denoise model output using boundary conditions
766
+ denoised = c_out * pred_x0 + c_skip * sample
767
+
768
+ # 5. Sample z ~ N(0, I), For MultiStep Inference
769
+ # Noise is not used for one-step sampling.
770
+ if len(self.timesteps) > 1:
771
+ noise = torch.randn(model_output.shape).to(model_output.device)
772
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
773
+ else:
774
+ prev_sample = denoised
775
+
776
+ if not return_dict:
777
+ return (prev_sample, denoised)
778
+
779
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
780
+
781
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
782
+ def add_noise(
783
+ self,
784
+ original_samples: torch.FloatTensor,
785
+ noise: torch.FloatTensor,
786
+ timesteps: torch.IntTensor,
787
+ ) -> torch.FloatTensor:
788
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
789
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
790
+ timesteps = timesteps.to(original_samples.device)
791
+
792
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
793
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
794
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
795
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
796
+
797
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
798
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
799
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
800
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
801
+
802
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
803
+ return noisy_samples
804
+
805
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
806
+ def get_velocity(
807
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
808
+ ) -> torch.FloatTensor:
809
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
810
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
811
+ timesteps = timesteps.to(sample.device)
812
+
813
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
814
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
815
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
816
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
817
+
818
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
819
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
820
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
821
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
822
+
823
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
824
+ return velocity
825
+
826
+ def __len__(self):
827
+ return self.config.num_train_timesteps
v0.26.3/latent_consistency_interpolate.py ADDED
@@ -0,0 +1,1051 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Any, Callable, Dict, List, Optional, Union
3
+
4
+ import numpy as np
5
+ import torch
6
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
7
+
8
+ from diffusers.image_processor import VaeImageProcessor
9
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
10
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
11
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
12
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
14
+ from diffusers.schedulers import LCMScheduler
15
+ from diffusers.utils import (
16
+ USE_PEFT_BACKEND,
17
+ deprecate,
18
+ logging,
19
+ replace_example_docstring,
20
+ scale_lora_layers,
21
+ unscale_lora_layers,
22
+ )
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
27
+
28
+ EXAMPLE_DOC_STRING = """
29
+ Examples:
30
+ ```py
31
+ >>> import torch
32
+ >>> import numpy as np
33
+
34
+ >>> from diffusers import DiffusionPipeline
35
+
36
+ >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_interpolate")
37
+ >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.
38
+ >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32)
39
+
40
+ >>> prompts = ["A cat", "A dog", "A horse"]
41
+ >>> num_inference_steps = 4
42
+ >>> num_interpolation_steps = 24
43
+ >>> seed = 1337
44
+
45
+ >>> torch.manual_seed(seed)
46
+ >>> np.random.seed(seed)
47
+
48
+ >>> images = pipe(
49
+ prompt=prompts,
50
+ height=512,
51
+ width=512,
52
+ num_inference_steps=num_inference_steps,
53
+ num_interpolation_steps=num_interpolation_steps,
54
+ guidance_scale=8.0,
55
+ embedding_interpolation_type="lerp",
56
+ latent_interpolation_type="slerp",
57
+ process_batch_size=4, # Make it higher or lower based on your GPU memory
58
+ generator=torch.Generator(seed),
59
+ )
60
+
61
+ >>> # Save the images as a video
62
+ >>> import imageio
63
+ >>> from PIL import Image
64
+
65
+ >>> def pil_to_video(images: List[Image.Image], filename: str, fps: int = 60) -> None:
66
+ frames = [np.array(image) for image in images]
67
+ with imageio.get_writer(filename, fps=fps) as video_writer:
68
+ for frame in frames:
69
+ video_writer.append_data(frame)
70
+
71
+ >>> pil_to_video(images, "lcm_interpolate.mp4", fps=24)
72
+ ```
73
+ """
74
+
75
+
76
+ def lerp(
77
+ v0: Union[torch.Tensor, np.ndarray],
78
+ v1: Union[torch.Tensor, np.ndarray],
79
+ t: Union[float, torch.Tensor, np.ndarray],
80
+ ) -> Union[torch.Tensor, np.ndarray]:
81
+ """
82
+ Linearly interpolate between two vectors/tensors.
83
+
84
+ Args:
85
+ v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
86
+ v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
87
+ t: (`float`, `torch.Tensor`, or `np.ndarray`):
88
+ Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
89
+ torch.Tensor, must be one dimensional with values between 0 and 1.
90
+
91
+ Returns:
92
+ Union[torch.Tensor, np.ndarray]
93
+ Interpolated vector/tensor between v0 and v1.
94
+ """
95
+ inputs_are_torch = False
96
+ t_is_float = False
97
+
98
+ if isinstance(v0, torch.Tensor):
99
+ inputs_are_torch = True
100
+ input_device = v0.device
101
+ v0 = v0.cpu().numpy()
102
+ v1 = v1.cpu().numpy()
103
+
104
+ if isinstance(t, torch.Tensor):
105
+ inputs_are_torch = True
106
+ input_device = t.device
107
+ t = t.cpu().numpy()
108
+ elif isinstance(t, float):
109
+ t_is_float = True
110
+ t = np.array([t])
111
+
112
+ t = t[..., None]
113
+ v0 = v0[None, ...]
114
+ v1 = v1[None, ...]
115
+ v2 = (1 - t) * v0 + t * v1
116
+
117
+ if t_is_float and v0.ndim > 1:
118
+ assert v2.shape[0] == 1
119
+ v2 = np.squeeze(v2, axis=0)
120
+ if inputs_are_torch:
121
+ v2 = torch.from_numpy(v2).to(input_device)
122
+
123
+ return v2
124
+
125
+
126
+ def slerp(
127
+ v0: Union[torch.Tensor, np.ndarray],
128
+ v1: Union[torch.Tensor, np.ndarray],
129
+ t: Union[float, torch.Tensor, np.ndarray],
130
+ DOT_THRESHOLD=0.9995,
131
+ ) -> Union[torch.Tensor, np.ndarray]:
132
+ """
133
+ Spherical linear interpolation between two vectors/tensors.
134
+
135
+ Args:
136
+ v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor.
137
+ v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor.
138
+ t: (`float`, `torch.Tensor`, or `np.ndarray`):
139
+ Interpolation factor. If float, must be between 0 and 1. If np.ndarray or
140
+ torch.Tensor, must be one dimensional with values between 0 and 1.
141
+ DOT_THRESHOLD (`float`, *optional*, default=0.9995):
142
+ Threshold for when to use linear interpolation instead of spherical interpolation.
143
+
144
+ Returns:
145
+ `torch.Tensor` or `np.ndarray`:
146
+ Interpolated vector/tensor between v0 and v1.
147
+ """
148
+ inputs_are_torch = False
149
+ t_is_float = False
150
+
151
+ if isinstance(v0, torch.Tensor):
152
+ inputs_are_torch = True
153
+ input_device = v0.device
154
+ v0 = v0.cpu().numpy()
155
+ v1 = v1.cpu().numpy()
156
+
157
+ if isinstance(t, torch.Tensor):
158
+ inputs_are_torch = True
159
+ input_device = t.device
160
+ t = t.cpu().numpy()
161
+ elif isinstance(t, float):
162
+ t_is_float = True
163
+ t = np.array([t], dtype=v0.dtype)
164
+
165
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
166
+ if np.abs(dot) > DOT_THRESHOLD:
167
+ # v1 and v2 are close to parallel
168
+ # Use linear interpolation instead
169
+ v2 = lerp(v0, v1, t)
170
+ else:
171
+ theta_0 = np.arccos(dot)
172
+ sin_theta_0 = np.sin(theta_0)
173
+ theta_t = theta_0 * t
174
+ sin_theta_t = np.sin(theta_t)
175
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
176
+ s1 = sin_theta_t / sin_theta_0
177
+ s0 = s0[..., None]
178
+ s1 = s1[..., None]
179
+ v0 = v0[None, ...]
180
+ v1 = v1[None, ...]
181
+ v2 = s0 * v0 + s1 * v1
182
+
183
+ if t_is_float and v0.ndim > 1:
184
+ assert v2.shape[0] == 1
185
+ v2 = np.squeeze(v2, axis=0)
186
+ if inputs_are_torch:
187
+ v2 = torch.from_numpy(v2).to(input_device)
188
+
189
+ return v2
190
+
191
+
192
+ class LatentConsistencyModelWalkPipeline(
193
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
194
+ ):
195
+ r"""
196
+ Pipeline for text-to-image generation using a latent consistency model.
197
+
198
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
199
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
200
+
201
+ The pipeline also inherits the following loading methods:
202
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
203
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
204
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
205
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
206
+
207
+ Args:
208
+ vae ([`AutoencoderKL`]):
209
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
210
+ text_encoder ([`~transformers.CLIPTextModel`]):
211
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
212
+ tokenizer ([`~transformers.CLIPTokenizer`]):
213
+ A `CLIPTokenizer` to tokenize text.
214
+ unet ([`UNet2DConditionModel`]):
215
+ A `UNet2DConditionModel` to denoise the encoded image latents.
216
+ scheduler ([`SchedulerMixin`]):
217
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only
218
+ supports [`LCMScheduler`].
219
+ safety_checker ([`StableDiffusionSafetyChecker`]):
220
+ Classification module that estimates whether generated images could be considered offensive or harmful.
221
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
222
+ about a model's potential harms.
223
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
224
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
225
+ requires_safety_checker (`bool`, *optional*, defaults to `True`):
226
+ Whether the pipeline requires a safety checker component.
227
+ """
228
+
229
+ model_cpu_offload_seq = "text_encoder->unet->vae"
230
+ _optional_components = ["safety_checker", "feature_extractor"]
231
+ _exclude_from_cpu_offload = ["safety_checker"]
232
+ _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"]
233
+
234
+ def __init__(
235
+ self,
236
+ vae: AutoencoderKL,
237
+ text_encoder: CLIPTextModel,
238
+ tokenizer: CLIPTokenizer,
239
+ unet: UNet2DConditionModel,
240
+ scheduler: LCMScheduler,
241
+ safety_checker: StableDiffusionSafetyChecker,
242
+ feature_extractor: CLIPImageProcessor,
243
+ requires_safety_checker: bool = True,
244
+ ):
245
+ super().__init__()
246
+
247
+ if safety_checker is None and requires_safety_checker:
248
+ logger.warning(
249
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
250
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
251
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
252
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
253
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
254
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
255
+ )
256
+
257
+ if safety_checker is not None and feature_extractor is None:
258
+ raise ValueError(
259
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
260
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
261
+ )
262
+
263
+ self.register_modules(
264
+ vae=vae,
265
+ text_encoder=text_encoder,
266
+ tokenizer=tokenizer,
267
+ unet=unet,
268
+ scheduler=scheduler,
269
+ safety_checker=safety_checker,
270
+ feature_extractor=feature_extractor,
271
+ )
272
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
273
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
274
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
275
+
276
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
277
+ def enable_vae_slicing(self):
278
+ r"""
279
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
280
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
281
+ """
282
+ self.vae.enable_slicing()
283
+
284
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
285
+ def disable_vae_slicing(self):
286
+ r"""
287
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
288
+ computing decoding in one step.
289
+ """
290
+ self.vae.disable_slicing()
291
+
292
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
293
+ def enable_vae_tiling(self):
294
+ r"""
295
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
296
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
297
+ processing larger images.
298
+ """
299
+ self.vae.enable_tiling()
300
+
301
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
302
+ def disable_vae_tiling(self):
303
+ r"""
304
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
305
+ computing decoding in one step.
306
+ """
307
+ self.vae.disable_tiling()
308
+
309
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
310
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
311
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
312
+
313
+ The suffixes after the scaling factors represent the stages where they are being applied.
314
+
315
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
316
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
317
+
318
+ Args:
319
+ s1 (`float`):
320
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
321
+ mitigate "oversmoothing effect" in the enhanced denoising process.
322
+ s2 (`float`):
323
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
324
+ mitigate "oversmoothing effect" in the enhanced denoising process.
325
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
326
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
327
+ """
328
+ if not hasattr(self, "unet"):
329
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
330
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
331
+
332
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
333
+ def disable_freeu(self):
334
+ """Disables the FreeU mechanism if enabled."""
335
+ self.unet.disable_freeu()
336
+
337
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
338
+ def encode_prompt(
339
+ self,
340
+ prompt,
341
+ device,
342
+ num_images_per_prompt,
343
+ do_classifier_free_guidance,
344
+ negative_prompt=None,
345
+ prompt_embeds: Optional[torch.FloatTensor] = None,
346
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
347
+ lora_scale: Optional[float] = None,
348
+ clip_skip: Optional[int] = None,
349
+ ):
350
+ r"""
351
+ Encodes the prompt into text encoder hidden states.
352
+
353
+ Args:
354
+ prompt (`str` or `List[str]`, *optional*):
355
+ prompt to be encoded
356
+ device: (`torch.device`):
357
+ torch device
358
+ num_images_per_prompt (`int`):
359
+ number of images that should be generated per prompt
360
+ do_classifier_free_guidance (`bool`):
361
+ whether to use classifier free guidance or not
362
+ negative_prompt (`str` or `List[str]`, *optional*):
363
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
364
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
365
+ less than `1`).
366
+ prompt_embeds (`torch.FloatTensor`, *optional*):
367
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
368
+ provided, text embeddings will be generated from `prompt` input argument.
369
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
370
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
371
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
372
+ argument.
373
+ lora_scale (`float`, *optional*):
374
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
375
+ clip_skip (`int`, *optional*):
376
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
377
+ the output of the pre-final layer will be used for computing the prompt embeddings.
378
+ """
379
+ # set lora scale so that monkey patched LoRA
380
+ # function of text encoder can correctly access it
381
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
382
+ self._lora_scale = lora_scale
383
+
384
+ # dynamically adjust the LoRA scale
385
+ if not USE_PEFT_BACKEND:
386
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
387
+ else:
388
+ scale_lora_layers(self.text_encoder, lora_scale)
389
+
390
+ if prompt is not None and isinstance(prompt, str):
391
+ batch_size = 1
392
+ elif prompt is not None and isinstance(prompt, list):
393
+ batch_size = len(prompt)
394
+ else:
395
+ batch_size = prompt_embeds.shape[0]
396
+
397
+ if prompt_embeds is None:
398
+ # textual inversion: procecss multi-vector tokens if necessary
399
+ if isinstance(self, TextualInversionLoaderMixin):
400
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
401
+
402
+ text_inputs = self.tokenizer(
403
+ prompt,
404
+ padding="max_length",
405
+ max_length=self.tokenizer.model_max_length,
406
+ truncation=True,
407
+ return_tensors="pt",
408
+ )
409
+ text_input_ids = text_inputs.input_ids
410
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
411
+
412
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
413
+ text_input_ids, untruncated_ids
414
+ ):
415
+ removed_text = self.tokenizer.batch_decode(
416
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
417
+ )
418
+ logger.warning(
419
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
420
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
421
+ )
422
+
423
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
424
+ attention_mask = text_inputs.attention_mask.to(device)
425
+ else:
426
+ attention_mask = None
427
+
428
+ if clip_skip is None:
429
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
430
+ prompt_embeds = prompt_embeds[0]
431
+ else:
432
+ prompt_embeds = self.text_encoder(
433
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
434
+ )
435
+ # Access the `hidden_states` first, that contains a tuple of
436
+ # all the hidden states from the encoder layers. Then index into
437
+ # the tuple to access the hidden states from the desired layer.
438
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
439
+ # We also need to apply the final LayerNorm here to not mess with the
440
+ # representations. The `last_hidden_states` that we typically use for
441
+ # obtaining the final prompt representations passes through the LayerNorm
442
+ # layer.
443
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
444
+
445
+ if self.text_encoder is not None:
446
+ prompt_embeds_dtype = self.text_encoder.dtype
447
+ elif self.unet is not None:
448
+ prompt_embeds_dtype = self.unet.dtype
449
+ else:
450
+ prompt_embeds_dtype = prompt_embeds.dtype
451
+
452
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
453
+
454
+ bs_embed, seq_len, _ = prompt_embeds.shape
455
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
456
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
457
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
458
+
459
+ # get unconditional embeddings for classifier free guidance
460
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
461
+ uncond_tokens: List[str]
462
+ if negative_prompt is None:
463
+ uncond_tokens = [""] * batch_size
464
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
465
+ raise TypeError(
466
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
467
+ f" {type(prompt)}."
468
+ )
469
+ elif isinstance(negative_prompt, str):
470
+ uncond_tokens = [negative_prompt]
471
+ elif batch_size != len(negative_prompt):
472
+ raise ValueError(
473
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
474
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
475
+ " the batch size of `prompt`."
476
+ )
477
+ else:
478
+ uncond_tokens = negative_prompt
479
+
480
+ # textual inversion: procecss multi-vector tokens if necessary
481
+ if isinstance(self, TextualInversionLoaderMixin):
482
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
483
+
484
+ max_length = prompt_embeds.shape[1]
485
+ uncond_input = self.tokenizer(
486
+ uncond_tokens,
487
+ padding="max_length",
488
+ max_length=max_length,
489
+ truncation=True,
490
+ return_tensors="pt",
491
+ )
492
+
493
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
494
+ attention_mask = uncond_input.attention_mask.to(device)
495
+ else:
496
+ attention_mask = None
497
+
498
+ negative_prompt_embeds = self.text_encoder(
499
+ uncond_input.input_ids.to(device),
500
+ attention_mask=attention_mask,
501
+ )
502
+ negative_prompt_embeds = negative_prompt_embeds[0]
503
+
504
+ if do_classifier_free_guidance:
505
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
506
+ seq_len = negative_prompt_embeds.shape[1]
507
+
508
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
509
+
510
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
511
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
512
+
513
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
514
+ # Retrieve the original scale by scaling back the LoRA layers
515
+ unscale_lora_layers(self.text_encoder, lora_scale)
516
+
517
+ return prompt_embeds, negative_prompt_embeds
518
+
519
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
520
+ def run_safety_checker(self, image, device, dtype):
521
+ if self.safety_checker is None:
522
+ has_nsfw_concept = None
523
+ else:
524
+ if torch.is_tensor(image):
525
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
526
+ else:
527
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
528
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
529
+ image, has_nsfw_concept = self.safety_checker(
530
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
531
+ )
532
+ return image, has_nsfw_concept
533
+
534
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
535
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
536
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
537
+ if isinstance(generator, list) and len(generator) != batch_size:
538
+ raise ValueError(
539
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
540
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
541
+ )
542
+
543
+ if latents is None:
544
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
545
+ else:
546
+ latents = latents.to(device)
547
+
548
+ # scale the initial noise by the standard deviation required by the scheduler
549
+ latents = latents * self.scheduler.init_noise_sigma
550
+ return latents
551
+
552
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
553
+ """
554
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
555
+
556
+ Args:
557
+ timesteps (`torch.Tensor`):
558
+ generate embedding vectors at these timesteps
559
+ embedding_dim (`int`, *optional*, defaults to 512):
560
+ dimension of the embeddings to generate
561
+ dtype:
562
+ data type of the generated embeddings
563
+
564
+ Returns:
565
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
566
+ """
567
+ assert len(w.shape) == 1
568
+ w = w * 1000.0
569
+
570
+ half_dim = embedding_dim // 2
571
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
572
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
573
+ emb = w.to(dtype)[:, None] * emb[None, :]
574
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
575
+ if embedding_dim % 2 == 1: # zero pad
576
+ emb = torch.nn.functional.pad(emb, (0, 1))
577
+ assert emb.shape == (w.shape[0], embedding_dim)
578
+ return emb
579
+
580
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
581
+ def prepare_extra_step_kwargs(self, generator, eta):
582
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
583
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
584
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
585
+ # and should be between [0, 1]
586
+
587
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
588
+ extra_step_kwargs = {}
589
+ if accepts_eta:
590
+ extra_step_kwargs["eta"] = eta
591
+
592
+ # check if the scheduler accepts generator
593
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
594
+ if accepts_generator:
595
+ extra_step_kwargs["generator"] = generator
596
+ return extra_step_kwargs
597
+
598
+ # Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed
599
+ def check_inputs(
600
+ self,
601
+ prompt: Union[str, List[str]],
602
+ height: int,
603
+ width: int,
604
+ callback_steps: int,
605
+ prompt_embeds: Optional[torch.FloatTensor] = None,
606
+ callback_on_step_end_tensor_inputs=None,
607
+ ):
608
+ if height % 8 != 0 or width % 8 != 0:
609
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
610
+
611
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
612
+ raise ValueError(
613
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
614
+ f" {type(callback_steps)}."
615
+ )
616
+
617
+ if callback_on_step_end_tensor_inputs is not None and not all(
618
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
619
+ ):
620
+ raise ValueError(
621
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
622
+ )
623
+
624
+ if prompt is not None and prompt_embeds is not None:
625
+ raise ValueError(
626
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
627
+ " only forward one of the two."
628
+ )
629
+ elif prompt is None and prompt_embeds is None:
630
+ raise ValueError(
631
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
632
+ )
633
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
634
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
635
+
636
+ @torch.no_grad()
637
+ def interpolate_embedding(
638
+ self,
639
+ start_embedding: torch.FloatTensor,
640
+ end_embedding: torch.FloatTensor,
641
+ num_interpolation_steps: Union[int, List[int]],
642
+ interpolation_type: str,
643
+ ) -> torch.FloatTensor:
644
+ if interpolation_type == "lerp":
645
+ interpolation_fn = lerp
646
+ elif interpolation_type == "slerp":
647
+ interpolation_fn = slerp
648
+ else:
649
+ raise ValueError(
650
+ f"embedding_interpolation_type must be one of ['lerp', 'slerp'], got {interpolation_type}."
651
+ )
652
+
653
+ embedding = torch.cat([start_embedding, end_embedding])
654
+ steps = torch.linspace(0, 1, num_interpolation_steps, dtype=embedding.dtype).cpu().numpy()
655
+ steps = np.expand_dims(steps, axis=tuple(range(1, embedding.ndim)))
656
+ interpolations = []
657
+
658
+ # Interpolate between text embeddings
659
+ # TODO(aryan): Think of a better way of doing this
660
+ # See if it can be done parallelly instead
661
+ for i in range(embedding.shape[0] - 1):
662
+ interpolations.append(interpolation_fn(embedding[i], embedding[i + 1], steps).squeeze(dim=1))
663
+
664
+ interpolations = torch.cat(interpolations)
665
+ return interpolations
666
+
667
+ @torch.no_grad()
668
+ def interpolate_latent(
669
+ self,
670
+ start_latent: torch.FloatTensor,
671
+ end_latent: torch.FloatTensor,
672
+ num_interpolation_steps: Union[int, List[int]],
673
+ interpolation_type: str,
674
+ ) -> torch.FloatTensor:
675
+ if interpolation_type == "lerp":
676
+ interpolation_fn = lerp
677
+ elif interpolation_type == "slerp":
678
+ interpolation_fn = slerp
679
+
680
+ latent = torch.cat([start_latent, end_latent])
681
+ steps = torch.linspace(0, 1, num_interpolation_steps, dtype=latent.dtype).cpu().numpy()
682
+ steps = np.expand_dims(steps, axis=tuple(range(1, latent.ndim)))
683
+ interpolations = []
684
+
685
+ # Interpolate between latents
686
+ # TODO: Think of a better way of doing this
687
+ # See if it can be done parallelly instead
688
+ for i in range(latent.shape[0] - 1):
689
+ interpolations.append(interpolation_fn(latent[i], latent[i + 1], steps).squeeze(dim=1))
690
+
691
+ return torch.cat(interpolations)
692
+
693
+ @property
694
+ def guidance_scale(self):
695
+ return self._guidance_scale
696
+
697
+ @property
698
+ def cross_attention_kwargs(self):
699
+ return self._cross_attention_kwargs
700
+
701
+ @property
702
+ def clip_skip(self):
703
+ return self._clip_skip
704
+
705
+ @property
706
+ def num_timesteps(self):
707
+ return self._num_timesteps
708
+
709
+ @torch.no_grad()
710
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
711
+ def __call__(
712
+ self,
713
+ prompt: Union[str, List[str]] = None,
714
+ height: Optional[int] = None,
715
+ width: Optional[int] = None,
716
+ num_inference_steps: int = 4,
717
+ num_interpolation_steps: int = 8,
718
+ original_inference_steps: int = None,
719
+ guidance_scale: float = 8.5,
720
+ num_images_per_prompt: Optional[int] = 1,
721
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
722
+ latents: Optional[torch.FloatTensor] = None,
723
+ prompt_embeds: Optional[torch.FloatTensor] = None,
724
+ output_type: Optional[str] = "pil",
725
+ return_dict: bool = True,
726
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
727
+ clip_skip: Optional[int] = None,
728
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
729
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
730
+ embedding_interpolation_type: str = "lerp",
731
+ latent_interpolation_type: str = "slerp",
732
+ process_batch_size: int = 4,
733
+ **kwargs,
734
+ ):
735
+ r"""
736
+ The call function to the pipeline for generation.
737
+
738
+ Args:
739
+ prompt (`str` or `List[str]`, *optional*):
740
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
741
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
742
+ The height in pixels of the generated image.
743
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
744
+ The width in pixels of the generated image.
745
+ num_inference_steps (`int`, *optional*, defaults to 50):
746
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
747
+ expense of slower inference.
748
+ original_inference_steps (`int`, *optional*):
749
+ The original number of inference steps use to generate a linearly-spaced timestep schedule, from which
750
+ we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule,
751
+ following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the
752
+ scheduler's `original_inference_steps` attribute.
753
+ guidance_scale (`float`, *optional*, defaults to 7.5):
754
+ A higher guidance scale value encourages the model to generate images closely linked to the text
755
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
756
+ Note that the original latent consistency models paper uses a different CFG formulation where the
757
+ guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale >
758
+ 0`).
759
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
760
+ The number of images to generate per prompt.
761
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
762
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
763
+ generation deterministic.
764
+ latents (`torch.FloatTensor`, *optional*):
765
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
766
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
767
+ tensor is generated by sampling using the supplied random `generator`.
768
+ prompt_embeds (`torch.FloatTensor`, *optional*):
769
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
770
+ provided, text embeddings are generated from the `prompt` input argument.
771
+ output_type (`str`, *optional*, defaults to `"pil"`):
772
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
773
+ return_dict (`bool`, *optional*, defaults to `True`):
774
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
775
+ plain tuple.
776
+ cross_attention_kwargs (`dict`, *optional*):
777
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
778
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
779
+ clip_skip (`int`, *optional*):
780
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
781
+ the output of the pre-final layer will be used for computing the prompt embeddings.
782
+ callback_on_step_end (`Callable`, *optional*):
783
+ A function that calls at the end of each denoising steps during the inference. The function is called
784
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
785
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
786
+ `callback_on_step_end_tensor_inputs`.
787
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
788
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
789
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
790
+ `._callback_tensor_inputs` attribute of your pipeine class.
791
+ embedding_interpolation_type (`str`, *optional*, defaults to `"lerp"`):
792
+ The type of interpolation to use for interpolating between text embeddings. Choose between `"lerp"` and `"slerp"`.
793
+ latent_interpolation_type (`str`, *optional*, defaults to `"slerp"`):
794
+ The type of interpolation to use for interpolating between latents. Choose between `"lerp"` and `"slerp"`.
795
+ process_batch_size (`int`, *optional*, defaults to 4):
796
+ The batch size to use for processing the images. This is useful when generating a large number of images
797
+ and you want to avoid running out of memory.
798
+
799
+ Examples:
800
+
801
+ Returns:
802
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
803
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
804
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
805
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
806
+ "not-safe-for-work" (nsfw) content.
807
+ """
808
+
809
+ callback = kwargs.pop("callback", None)
810
+ callback_steps = kwargs.pop("callback_steps", None)
811
+
812
+ if callback is not None:
813
+ deprecate(
814
+ "callback",
815
+ "1.0.0",
816
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
817
+ )
818
+ if callback_steps is not None:
819
+ deprecate(
820
+ "callback_steps",
821
+ "1.0.0",
822
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`",
823
+ )
824
+
825
+ # 0. Default height and width to unet
826
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
827
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
828
+
829
+ # 1. Check inputs. Raise error if not correct
830
+ self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, callback_on_step_end_tensor_inputs)
831
+ self._guidance_scale = guidance_scale
832
+ self._clip_skip = clip_skip
833
+ self._cross_attention_kwargs = cross_attention_kwargs
834
+
835
+ # 2. Define call parameters
836
+ if prompt is not None and isinstance(prompt, str):
837
+ batch_size = 1
838
+ elif prompt is not None and isinstance(prompt, list):
839
+ batch_size = len(prompt)
840
+ else:
841
+ batch_size = prompt_embeds.shape[0]
842
+ if batch_size < 2:
843
+ raise ValueError(f"`prompt` must have length of atleast 2 but found {batch_size}")
844
+ if num_images_per_prompt != 1:
845
+ raise ValueError("`num_images_per_prompt` must be `1` as no other value is supported yet")
846
+ if prompt_embeds is not None:
847
+ raise ValueError("`prompt_embeds` must be None since it is not supported yet")
848
+ if latents is not None:
849
+ raise ValueError("`latents` must be None since it is not supported yet")
850
+
851
+ device = self._execution_device
852
+ # do_classifier_free_guidance = guidance_scale > 1.0
853
+
854
+ lora_scale = (
855
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
856
+ )
857
+
858
+ self.scheduler.set_timesteps(num_inference_steps, device, original_inference_steps=original_inference_steps)
859
+ timesteps = self.scheduler.timesteps
860
+ num_channels_latents = self.unet.config.in_channels
861
+ # bs = batch_size * num_images_per_prompt
862
+
863
+ # 3. Encode initial input prompt
864
+ prompt_embeds_1, _ = self.encode_prompt(
865
+ prompt[:1],
866
+ device,
867
+ num_images_per_prompt=num_images_per_prompt,
868
+ do_classifier_free_guidance=False,
869
+ negative_prompt=None,
870
+ prompt_embeds=prompt_embeds,
871
+ negative_prompt_embeds=None,
872
+ lora_scale=lora_scale,
873
+ clip_skip=self.clip_skip,
874
+ )
875
+
876
+ # 4. Prepare initial latent variables
877
+ latents_1 = self.prepare_latents(
878
+ 1,
879
+ num_channels_latents,
880
+ height,
881
+ width,
882
+ prompt_embeds_1.dtype,
883
+ device,
884
+ generator,
885
+ latents,
886
+ )
887
+
888
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None)
889
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
890
+ self._num_timesteps = len(timesteps)
891
+ images = []
892
+
893
+ # 5. Iterate over prompts and perform latent walk. Note that we do this two prompts at a time
894
+ # otherwise the memory usage ends up being too high.
895
+ with self.progress_bar(total=batch_size - 1) as prompt_progress_bar:
896
+ for i in range(1, batch_size):
897
+ # 6. Encode current prompt
898
+ prompt_embeds_2, _ = self.encode_prompt(
899
+ prompt[i : i + 1],
900
+ device,
901
+ num_images_per_prompt=num_images_per_prompt,
902
+ do_classifier_free_guidance=False,
903
+ negative_prompt=None,
904
+ prompt_embeds=prompt_embeds,
905
+ negative_prompt_embeds=None,
906
+ lora_scale=lora_scale,
907
+ clip_skip=self.clip_skip,
908
+ )
909
+
910
+ # 7. Prepare current latent variables
911
+ latents_2 = self.prepare_latents(
912
+ 1,
913
+ num_channels_latents,
914
+ height,
915
+ width,
916
+ prompt_embeds_2.dtype,
917
+ device,
918
+ generator,
919
+ latents,
920
+ )
921
+
922
+ # 8. Interpolate between previous and current prompt embeddings and latents
923
+ inference_embeddings = self.interpolate_embedding(
924
+ start_embedding=prompt_embeds_1,
925
+ end_embedding=prompt_embeds_2,
926
+ num_interpolation_steps=num_interpolation_steps,
927
+ interpolation_type=embedding_interpolation_type,
928
+ )
929
+ inference_latents = self.interpolate_latent(
930
+ start_latent=latents_1,
931
+ end_latent=latents_2,
932
+ num_interpolation_steps=num_interpolation_steps,
933
+ interpolation_type=latent_interpolation_type,
934
+ )
935
+ next_prompt_embeds = inference_embeddings[-1:].detach().clone()
936
+ next_latents = inference_latents[-1:].detach().clone()
937
+ bs = num_interpolation_steps
938
+
939
+ # 9. Perform inference in batches. Note the use of `process_batch_size` to control the batch size
940
+ # of the inference. This is useful for reducing memory usage and can be configured based on the
941
+ # available GPU memory.
942
+ with self.progress_bar(
943
+ total=(bs + process_batch_size - 1) // process_batch_size
944
+ ) as batch_progress_bar:
945
+ for batch_index in range(0, bs, process_batch_size):
946
+ batch_inference_latents = inference_latents[batch_index : batch_index + process_batch_size]
947
+ batch_inference_embedddings = inference_embeddings[
948
+ batch_index : batch_index + process_batch_size
949
+ ]
950
+
951
+ self.scheduler.set_timesteps(
952
+ num_inference_steps, device, original_inference_steps=original_inference_steps
953
+ )
954
+ timesteps = self.scheduler.timesteps
955
+
956
+ current_bs = batch_inference_embedddings.shape[0]
957
+ w = torch.tensor(self.guidance_scale - 1).repeat(current_bs)
958
+ w_embedding = self.get_guidance_scale_embedding(
959
+ w, embedding_dim=self.unet.config.time_cond_proj_dim
960
+ ).to(device=device, dtype=latents_1.dtype)
961
+
962
+ # 10. Perform inference for current batch
963
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
964
+ for index, t in enumerate(timesteps):
965
+ batch_inference_latents = batch_inference_latents.to(batch_inference_embedddings.dtype)
966
+
967
+ # model prediction (v-prediction, eps, x)
968
+ model_pred = self.unet(
969
+ batch_inference_latents,
970
+ t,
971
+ timestep_cond=w_embedding,
972
+ encoder_hidden_states=batch_inference_embedddings,
973
+ cross_attention_kwargs=self.cross_attention_kwargs,
974
+ return_dict=False,
975
+ )[0]
976
+
977
+ # compute the previous noisy sample x_t -> x_t-1
978
+ batch_inference_latents, denoised = self.scheduler.step(
979
+ model_pred, t, batch_inference_latents, **extra_step_kwargs, return_dict=False
980
+ )
981
+ if callback_on_step_end is not None:
982
+ callback_kwargs = {}
983
+ for k in callback_on_step_end_tensor_inputs:
984
+ callback_kwargs[k] = locals()[k]
985
+ callback_outputs = callback_on_step_end(self, index, t, callback_kwargs)
986
+
987
+ batch_inference_latents = callback_outputs.pop("latents", batch_inference_latents)
988
+ batch_inference_embedddings = callback_outputs.pop(
989
+ "prompt_embeds", batch_inference_embedddings
990
+ )
991
+ w_embedding = callback_outputs.pop("w_embedding", w_embedding)
992
+ denoised = callback_outputs.pop("denoised", denoised)
993
+
994
+ # call the callback, if provided
995
+ if index == len(timesteps) - 1 or (
996
+ (index + 1) > num_warmup_steps and (index + 1) % self.scheduler.order == 0
997
+ ):
998
+ progress_bar.update()
999
+ if callback is not None and index % callback_steps == 0:
1000
+ step_idx = index // getattr(self.scheduler, "order", 1)
1001
+ callback(step_idx, t, batch_inference_latents)
1002
+
1003
+ denoised = denoised.to(batch_inference_embedddings.dtype)
1004
+
1005
+ # Note: This is not supported because you would get black images in your latent walk if
1006
+ # NSFW concept is detected
1007
+ # if not output_type == "latent":
1008
+ # image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
1009
+ # image, has_nsfw_concept = self.run_safety_checker(image, device, inference_embeddings.dtype)
1010
+ # else:
1011
+ # image = denoised
1012
+ # has_nsfw_concept = None
1013
+
1014
+ # if has_nsfw_concept is None:
1015
+ # do_denormalize = [True] * image.shape[0]
1016
+ # else:
1017
+ # do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1018
+
1019
+ image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
1020
+ do_denormalize = [True] * image.shape[0]
1021
+ has_nsfw_concept = None
1022
+
1023
+ image = self.image_processor.postprocess(
1024
+ image, output_type=output_type, do_denormalize=do_denormalize
1025
+ )
1026
+ images.append(image)
1027
+
1028
+ batch_progress_bar.update()
1029
+
1030
+ prompt_embeds_1 = next_prompt_embeds
1031
+ latents_1 = next_latents
1032
+
1033
+ prompt_progress_bar.update()
1034
+
1035
+ # 11. Determine what should be returned
1036
+ if output_type == "pil":
1037
+ images = [image for image_list in images for image in image_list]
1038
+ elif output_type == "np":
1039
+ images = np.concatenate(images)
1040
+ elif output_type == "pt":
1041
+ images = torch.cat(images)
1042
+ else:
1043
+ raise ValueError("`output_type` must be one of 'pil', 'np' or 'pt'.")
1044
+
1045
+ # Offload all models
1046
+ self.maybe_free_model_hooks()
1047
+
1048
+ if not return_dict:
1049
+ return (images, has_nsfw_concept)
1050
+
1051
+ return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
v0.26.3/latent_consistency_txt2img.py ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
+ # and https://github.com/hojonathanho/diffusion
17
+
18
+ import math
19
+ from dataclasses import dataclass
20
+ from typing import Any, Dict, List, Optional, Tuple, Union
21
+
22
+ import numpy as np
23
+ import torch
24
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
25
+
26
+ from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging
27
+ from diffusers.configuration_utils import register_to_config
28
+ from diffusers.image_processor import VaeImageProcessor
29
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
30
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
31
+ from diffusers.utils import BaseOutput
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ class LatentConsistencyModelPipeline(DiffusionPipeline):
38
+ _optional_components = ["scheduler"]
39
+
40
+ def __init__(
41
+ self,
42
+ vae: AutoencoderKL,
43
+ text_encoder: CLIPTextModel,
44
+ tokenizer: CLIPTokenizer,
45
+ unet: UNet2DConditionModel,
46
+ scheduler: "LCMScheduler",
47
+ safety_checker: StableDiffusionSafetyChecker,
48
+ feature_extractor: CLIPImageProcessor,
49
+ requires_safety_checker: bool = True,
50
+ ):
51
+ super().__init__()
52
+
53
+ scheduler = (
54
+ scheduler
55
+ if scheduler is not None
56
+ else LCMScheduler(
57
+ beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon"
58
+ )
59
+ )
60
+
61
+ self.register_modules(
62
+ vae=vae,
63
+ text_encoder=text_encoder,
64
+ tokenizer=tokenizer,
65
+ unet=unet,
66
+ scheduler=scheduler,
67
+ safety_checker=safety_checker,
68
+ feature_extractor=feature_extractor,
69
+ )
70
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
71
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
72
+
73
+ def _encode_prompt(
74
+ self,
75
+ prompt,
76
+ device,
77
+ num_images_per_prompt,
78
+ prompt_embeds: None,
79
+ ):
80
+ r"""
81
+ Encodes the prompt into text encoder hidden states.
82
+ Args:
83
+ prompt (`str` or `List[str]`, *optional*):
84
+ prompt to be encoded
85
+ device: (`torch.device`):
86
+ torch device
87
+ num_images_per_prompt (`int`):
88
+ number of images that should be generated per prompt
89
+ prompt_embeds (`torch.FloatTensor`, *optional*):
90
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
91
+ provided, text embeddings will be generated from `prompt` input argument.
92
+ """
93
+
94
+ if prompt is not None and isinstance(prompt, str):
95
+ pass
96
+ elif prompt is not None and isinstance(prompt, list):
97
+ len(prompt)
98
+ else:
99
+ prompt_embeds.shape[0]
100
+
101
+ if prompt_embeds is None:
102
+ text_inputs = self.tokenizer(
103
+ prompt,
104
+ padding="max_length",
105
+ max_length=self.tokenizer.model_max_length,
106
+ truncation=True,
107
+ return_tensors="pt",
108
+ )
109
+ text_input_ids = text_inputs.input_ids
110
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
111
+
112
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
113
+ text_input_ids, untruncated_ids
114
+ ):
115
+ removed_text = self.tokenizer.batch_decode(
116
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
117
+ )
118
+ logger.warning(
119
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
120
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
121
+ )
122
+
123
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
124
+ attention_mask = text_inputs.attention_mask.to(device)
125
+ else:
126
+ attention_mask = None
127
+
128
+ prompt_embeds = self.text_encoder(
129
+ text_input_ids.to(device),
130
+ attention_mask=attention_mask,
131
+ )
132
+ prompt_embeds = prompt_embeds[0]
133
+
134
+ if self.text_encoder is not None:
135
+ prompt_embeds_dtype = self.text_encoder.dtype
136
+ elif self.unet is not None:
137
+ prompt_embeds_dtype = self.unet.dtype
138
+ else:
139
+ prompt_embeds_dtype = prompt_embeds.dtype
140
+
141
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
142
+
143
+ bs_embed, seq_len, _ = prompt_embeds.shape
144
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
145
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
146
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
147
+
148
+ # Don't need to get uncond prompt embedding because of LCM Guided Distillation
149
+ return prompt_embeds
150
+
151
+ def run_safety_checker(self, image, device, dtype):
152
+ if self.safety_checker is None:
153
+ has_nsfw_concept = None
154
+ else:
155
+ if torch.is_tensor(image):
156
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
157
+ else:
158
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
159
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
160
+ image, has_nsfw_concept = self.safety_checker(
161
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
162
+ )
163
+ return image, has_nsfw_concept
164
+
165
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):
166
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
167
+ if latents is None:
168
+ latents = torch.randn(shape, dtype=dtype).to(device)
169
+ else:
170
+ latents = latents.to(device)
171
+ # scale the initial noise by the standard deviation required by the scheduler
172
+ latents = latents * self.scheduler.init_noise_sigma
173
+ return latents
174
+
175
+ def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
176
+ """
177
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
178
+ Args:
179
+ timesteps: torch.Tensor: generate embedding vectors at these timesteps
180
+ embedding_dim: int: dimension of the embeddings to generate
181
+ dtype: data type of the generated embeddings
182
+ Returns:
183
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
184
+ """
185
+ assert len(w.shape) == 1
186
+ w = w * 1000.0
187
+
188
+ half_dim = embedding_dim // 2
189
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
190
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
191
+ emb = w.to(dtype)[:, None] * emb[None, :]
192
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
193
+ if embedding_dim % 2 == 1: # zero pad
194
+ emb = torch.nn.functional.pad(emb, (0, 1))
195
+ assert emb.shape == (w.shape[0], embedding_dim)
196
+ return emb
197
+
198
+ @torch.no_grad()
199
+ def __call__(
200
+ self,
201
+ prompt: Union[str, List[str]] = None,
202
+ height: Optional[int] = 768,
203
+ width: Optional[int] = 768,
204
+ guidance_scale: float = 7.5,
205
+ num_images_per_prompt: Optional[int] = 1,
206
+ latents: Optional[torch.FloatTensor] = None,
207
+ num_inference_steps: int = 4,
208
+ lcm_origin_steps: int = 50,
209
+ prompt_embeds: Optional[torch.FloatTensor] = None,
210
+ output_type: Optional[str] = "pil",
211
+ return_dict: bool = True,
212
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
213
+ ):
214
+ # 0. Default height and width to unet
215
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
216
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
217
+
218
+ # 2. Define call parameters
219
+ if prompt is not None and isinstance(prompt, str):
220
+ batch_size = 1
221
+ elif prompt is not None and isinstance(prompt, list):
222
+ batch_size = len(prompt)
223
+ else:
224
+ batch_size = prompt_embeds.shape[0]
225
+
226
+ device = self._execution_device
227
+ # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
228
+
229
+ # 3. Encode input prompt
230
+ prompt_embeds = self._encode_prompt(
231
+ prompt,
232
+ device,
233
+ num_images_per_prompt,
234
+ prompt_embeds=prompt_embeds,
235
+ )
236
+
237
+ # 4. Prepare timesteps
238
+ self.scheduler.set_timesteps(num_inference_steps, lcm_origin_steps)
239
+ timesteps = self.scheduler.timesteps
240
+
241
+ # 5. Prepare latent variable
242
+ num_channels_latents = self.unet.config.in_channels
243
+ latents = self.prepare_latents(
244
+ batch_size * num_images_per_prompt,
245
+ num_channels_latents,
246
+ height,
247
+ width,
248
+ prompt_embeds.dtype,
249
+ device,
250
+ latents,
251
+ )
252
+ bs = batch_size * num_images_per_prompt
253
+
254
+ # 6. Get Guidance Scale Embedding
255
+ w = torch.tensor(guidance_scale).repeat(bs)
256
+ w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)
257
+
258
+ # 7. LCM MultiStep Sampling Loop:
259
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
260
+ for i, t in enumerate(timesteps):
261
+ ts = torch.full((bs,), t, device=device, dtype=torch.long)
262
+ latents = latents.to(prompt_embeds.dtype)
263
+
264
+ # model prediction (v-prediction, eps, x)
265
+ model_pred = self.unet(
266
+ latents,
267
+ ts,
268
+ timestep_cond=w_embedding,
269
+ encoder_hidden_states=prompt_embeds,
270
+ cross_attention_kwargs=cross_attention_kwargs,
271
+ return_dict=False,
272
+ )[0]
273
+
274
+ # compute the previous noisy sample x_t -> x_t-1
275
+ latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)
276
+
277
+ # # call the callback, if provided
278
+ # if i == len(timesteps) - 1:
279
+ progress_bar.update()
280
+
281
+ denoised = denoised.to(prompt_embeds.dtype)
282
+ if not output_type == "latent":
283
+ image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]
284
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
285
+ else:
286
+ image = denoised
287
+ has_nsfw_concept = None
288
+
289
+ if has_nsfw_concept is None:
290
+ do_denormalize = [True] * image.shape[0]
291
+ else:
292
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
293
+
294
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
295
+
296
+ if not return_dict:
297
+ return (image, has_nsfw_concept)
298
+
299
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
300
+
301
+
302
+ @dataclass
303
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
304
+ class LCMSchedulerOutput(BaseOutput):
305
+ """
306
+ Output class for the scheduler's `step` function output.
307
+ Args:
308
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
309
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
310
+ denoising loop.
311
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
312
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
313
+ `pred_original_sample` can be used to preview progress or for guidance.
314
+ """
315
+
316
+ prev_sample: torch.FloatTensor
317
+ denoised: Optional[torch.FloatTensor] = None
318
+
319
+
320
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
321
+ def betas_for_alpha_bar(
322
+ num_diffusion_timesteps,
323
+ max_beta=0.999,
324
+ alpha_transform_type="cosine",
325
+ ):
326
+ """
327
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
328
+ (1-beta) over time from t = [0,1].
329
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
330
+ to that part of the diffusion process.
331
+ Args:
332
+ num_diffusion_timesteps (`int`): the number of betas to produce.
333
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
334
+ prevent singularities.
335
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
336
+ Choose from `cosine` or `exp`
337
+ Returns:
338
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
339
+ """
340
+ if alpha_transform_type == "cosine":
341
+
342
+ def alpha_bar_fn(t):
343
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
344
+
345
+ elif alpha_transform_type == "exp":
346
+
347
+ def alpha_bar_fn(t):
348
+ return math.exp(t * -12.0)
349
+
350
+ else:
351
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
352
+
353
+ betas = []
354
+ for i in range(num_diffusion_timesteps):
355
+ t1 = i / num_diffusion_timesteps
356
+ t2 = (i + 1) / num_diffusion_timesteps
357
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
358
+ return torch.tensor(betas, dtype=torch.float32)
359
+
360
+
361
+ def rescale_zero_terminal_snr(betas):
362
+ """
363
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
364
+ Args:
365
+ betas (`torch.FloatTensor`):
366
+ the betas that the scheduler is being initialized with.
367
+ Returns:
368
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
369
+ """
370
+ # Convert betas to alphas_bar_sqrt
371
+ alphas = 1.0 - betas
372
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
373
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
374
+
375
+ # Store old values.
376
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
377
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
378
+
379
+ # Shift so the last timestep is zero.
380
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
381
+
382
+ # Scale so the first timestep is back to the old value.
383
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
384
+
385
+ # Convert alphas_bar_sqrt to betas
386
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
387
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
388
+ alphas = torch.cat([alphas_bar[0:1], alphas])
389
+ betas = 1 - alphas
390
+
391
+ return betas
392
+
393
+
394
+ class LCMScheduler(SchedulerMixin, ConfigMixin):
395
+ """
396
+ `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
397
+ non-Markovian guidance.
398
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
399
+ methods the library implements for all schedulers such as loading and saving.
400
+ Args:
401
+ num_train_timesteps (`int`, defaults to 1000):
402
+ The number of diffusion steps to train the model.
403
+ beta_start (`float`, defaults to 0.0001):
404
+ The starting `beta` value of inference.
405
+ beta_end (`float`, defaults to 0.02):
406
+ The final `beta` value.
407
+ beta_schedule (`str`, defaults to `"linear"`):
408
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
409
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
410
+ trained_betas (`np.ndarray`, *optional*):
411
+ Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
412
+ clip_sample (`bool`, defaults to `True`):
413
+ Clip the predicted sample for numerical stability.
414
+ clip_sample_range (`float`, defaults to 1.0):
415
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
416
+ set_alpha_to_one (`bool`, defaults to `True`):
417
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
418
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
419
+ otherwise it uses the alpha value at step 0.
420
+ steps_offset (`int`, defaults to 0):
421
+ An offset added to the inference steps. You can use a combination of `offset=1` and
422
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
423
+ Diffusion.
424
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
425
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
426
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
427
+ Video](https://imagen.research.google/video/paper.pdf) paper).
428
+ thresholding (`bool`, defaults to `False`):
429
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
430
+ as Stable Diffusion.
431
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
432
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
433
+ sample_max_value (`float`, defaults to 1.0):
434
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
435
+ timestep_spacing (`str`, defaults to `"leading"`):
436
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
437
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
438
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
439
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
440
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
441
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
442
+ """
443
+
444
+ # _compatibles = [e.name for e in KarrasDiffusionSchedulers]
445
+ order = 1
446
+
447
+ @register_to_config
448
+ def __init__(
449
+ self,
450
+ num_train_timesteps: int = 1000,
451
+ beta_start: float = 0.0001,
452
+ beta_end: float = 0.02,
453
+ beta_schedule: str = "linear",
454
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
455
+ clip_sample: bool = True,
456
+ set_alpha_to_one: bool = True,
457
+ steps_offset: int = 0,
458
+ prediction_type: str = "epsilon",
459
+ thresholding: bool = False,
460
+ dynamic_thresholding_ratio: float = 0.995,
461
+ clip_sample_range: float = 1.0,
462
+ sample_max_value: float = 1.0,
463
+ timestep_spacing: str = "leading",
464
+ rescale_betas_zero_snr: bool = False,
465
+ ):
466
+ if trained_betas is not None:
467
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
468
+ elif beta_schedule == "linear":
469
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
470
+ elif beta_schedule == "scaled_linear":
471
+ # this schedule is very specific to the latent diffusion model.
472
+ self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
473
+ elif beta_schedule == "squaredcos_cap_v2":
474
+ # Glide cosine schedule
475
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
476
+ else:
477
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
478
+
479
+ # Rescale for zero SNR
480
+ if rescale_betas_zero_snr:
481
+ self.betas = rescale_zero_terminal_snr(self.betas)
482
+
483
+ self.alphas = 1.0 - self.betas
484
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
485
+
486
+ # At every step in ddim, we are looking into the previous alphas_cumprod
487
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
488
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
489
+ # whether we use the final alpha of the "non-previous" one.
490
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
491
+
492
+ # standard deviation of the initial noise distribution
493
+ self.init_noise_sigma = 1.0
494
+
495
+ # setable values
496
+ self.num_inference_steps = None
497
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
498
+
499
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
500
+ """
501
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
502
+ current timestep.
503
+ Args:
504
+ sample (`torch.FloatTensor`):
505
+ The input sample.
506
+ timestep (`int`, *optional*):
507
+ The current timestep in the diffusion chain.
508
+ Returns:
509
+ `torch.FloatTensor`:
510
+ A scaled input sample.
511
+ """
512
+ return sample
513
+
514
+ def _get_variance(self, timestep, prev_timestep):
515
+ alpha_prod_t = self.alphas_cumprod[timestep]
516
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
517
+ beta_prod_t = 1 - alpha_prod_t
518
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
519
+
520
+ variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
521
+
522
+ return variance
523
+
524
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
525
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
526
+ """
527
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
528
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
529
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
530
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
531
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
532
+ https://arxiv.org/abs/2205.11487
533
+ """
534
+ dtype = sample.dtype
535
+ batch_size, channels, height, width = sample.shape
536
+
537
+ if dtype not in (torch.float32, torch.float64):
538
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
539
+
540
+ # Flatten sample for doing quantile calculation along each image
541
+ sample = sample.reshape(batch_size, channels * height * width)
542
+
543
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
544
+
545
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
546
+ s = torch.clamp(
547
+ s, min=1, max=self.config.sample_max_value
548
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
549
+
550
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
551
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
552
+
553
+ sample = sample.reshape(batch_size, channels, height, width)
554
+ sample = sample.to(dtype)
555
+
556
+ return sample
557
+
558
+ def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):
559
+ """
560
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
561
+ Args:
562
+ num_inference_steps (`int`):
563
+ The number of diffusion steps used when generating samples with a pre-trained model.
564
+ """
565
+
566
+ if num_inference_steps > self.config.num_train_timesteps:
567
+ raise ValueError(
568
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
569
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
570
+ f" maximal {self.config.num_train_timesteps} timesteps."
571
+ )
572
+
573
+ self.num_inference_steps = num_inference_steps
574
+
575
+ # LCM Timesteps Setting: # Linear Spacing
576
+ c = self.config.num_train_timesteps // lcm_origin_steps
577
+ lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule
578
+ skipping_step = len(lcm_origin_timesteps) // num_inference_steps
579
+ timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule
580
+
581
+ self.timesteps = torch.from_numpy(timesteps.copy()).to(device)
582
+
583
+ def get_scalings_for_boundary_condition_discrete(self, t):
584
+ self.sigma_data = 0.5 # Default: 0.5
585
+
586
+ # By dividing 0.1: This is almost a delta function at t=0.
587
+ c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2)
588
+ c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5
589
+ return c_skip, c_out
590
+
591
+ def step(
592
+ self,
593
+ model_output: torch.FloatTensor,
594
+ timeindex: int,
595
+ timestep: int,
596
+ sample: torch.FloatTensor,
597
+ eta: float = 0.0,
598
+ use_clipped_model_output: bool = False,
599
+ generator=None,
600
+ variance_noise: Optional[torch.FloatTensor] = None,
601
+ return_dict: bool = True,
602
+ ) -> Union[LCMSchedulerOutput, Tuple]:
603
+ """
604
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
605
+ process from the learned model outputs (most often the predicted noise).
606
+ Args:
607
+ model_output (`torch.FloatTensor`):
608
+ The direct output from learned diffusion model.
609
+ timestep (`float`):
610
+ The current discrete timestep in the diffusion chain.
611
+ sample (`torch.FloatTensor`):
612
+ A current instance of a sample created by the diffusion process.
613
+ eta (`float`):
614
+ The weight of noise for added noise in diffusion step.
615
+ use_clipped_model_output (`bool`, defaults to `False`):
616
+ If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
617
+ because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
618
+ clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
619
+ `use_clipped_model_output` has no effect.
620
+ generator (`torch.Generator`, *optional*):
621
+ A random number generator.
622
+ variance_noise (`torch.FloatTensor`):
623
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
624
+ itself. Useful for methods such as [`CycleDiffusion`].
625
+ return_dict (`bool`, *optional*, defaults to `True`):
626
+ Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.
627
+ Returns:
628
+ [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:
629
+ If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a
630
+ tuple is returned where the first element is the sample tensor.
631
+ """
632
+ if self.num_inference_steps is None:
633
+ raise ValueError(
634
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
635
+ )
636
+
637
+ # 1. get previous step value
638
+ prev_timeindex = timeindex + 1
639
+ if prev_timeindex < len(self.timesteps):
640
+ prev_timestep = self.timesteps[prev_timeindex]
641
+ else:
642
+ prev_timestep = timestep
643
+
644
+ # 2. compute alphas, betas
645
+ alpha_prod_t = self.alphas_cumprod[timestep]
646
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
647
+
648
+ beta_prod_t = 1 - alpha_prod_t
649
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
650
+
651
+ # 3. Get scalings for boundary conditions
652
+ c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)
653
+
654
+ # 4. Different Parameterization:
655
+ parameterization = self.config.prediction_type
656
+
657
+ if parameterization == "epsilon": # noise-prediction
658
+ pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()
659
+
660
+ elif parameterization == "sample": # x-prediction
661
+ pred_x0 = model_output
662
+
663
+ elif parameterization == "v_prediction": # v-prediction
664
+ pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output
665
+
666
+ # 4. Denoise model output using boundary conditions
667
+ denoised = c_out * pred_x0 + c_skip * sample
668
+
669
+ # 5. Sample z ~ N(0, I), For MultiStep Inference
670
+ # Noise is not used for one-step sampling.
671
+ if len(self.timesteps) > 1:
672
+ noise = torch.randn(model_output.shape).to(model_output.device)
673
+ prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise
674
+ else:
675
+ prev_sample = denoised
676
+
677
+ if not return_dict:
678
+ return (prev_sample, denoised)
679
+
680
+ return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)
681
+
682
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
683
+ def add_noise(
684
+ self,
685
+ original_samples: torch.FloatTensor,
686
+ noise: torch.FloatTensor,
687
+ timesteps: torch.IntTensor,
688
+ ) -> torch.FloatTensor:
689
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
690
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
691
+ timesteps = timesteps.to(original_samples.device)
692
+
693
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
694
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
695
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
696
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
697
+
698
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
699
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
700
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
701
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
702
+
703
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
704
+ return noisy_samples
705
+
706
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
707
+ def get_velocity(
708
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
709
+ ) -> torch.FloatTensor:
710
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
711
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
712
+ timesteps = timesteps.to(sample.device)
713
+
714
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
715
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
716
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
717
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
718
+
719
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
720
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
721
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
722
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
723
+
724
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
725
+ return velocity
726
+
727
+ def __len__(self):
728
+ return self.config.num_train_timesteps
v0.26.3/llm_grounded_diffusion.py ADDED
@@ -0,0 +1,1613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Long Lian, the GLIGEN Authors, and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # This is a single file implementation of LMD+. See README.md for examples.
16
+
17
+ import ast
18
+ import gc
19
+ import inspect
20
+ import math
21
+ import warnings
22
+ from collections.abc import Iterable
23
+ from typing import Any, Callable, Dict, List, Optional, Union
24
+
25
+ import torch
26
+ import torch.nn.functional as F
27
+ from packaging import version
28
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
29
+
30
+ from diffusers.configuration_utils import FrozenDict
31
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
32
+ from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
33
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
34
+ from diffusers.models.attention import Attention, GatedSelfAttentionDense
35
+ from diffusers.models.attention_processor import AttnProcessor2_0
36
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
37
+ from diffusers.pipelines import DiffusionPipeline
38
+ from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
39
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
40
+ from diffusers.schedulers import KarrasDiffusionSchedulers
41
+ from diffusers.utils import (
42
+ USE_PEFT_BACKEND,
43
+ deprecate,
44
+ logging,
45
+ replace_example_docstring,
46
+ scale_lora_layers,
47
+ unscale_lora_layers,
48
+ )
49
+ from diffusers.utils.torch_utils import randn_tensor
50
+
51
+
52
+ EXAMPLE_DOC_STRING = """
53
+ Examples:
54
+ ```py
55
+ >>> import torch
56
+ >>> from diffusers import DiffusionPipeline
57
+
58
+ >>> pipe = DiffusionPipeline.from_pretrained(
59
+ ... "longlian/lmd_plus",
60
+ ... custom_pipeline="llm_grounded_diffusion",
61
+ ... custom_revision="main",
62
+ ... variant="fp16", torch_dtype=torch.float16
63
+ ... )
64
+ >>> pipe.enable_model_cpu_offload()
65
+
66
+ >>> # Generate an image described by the prompt and
67
+ >>> # insert objects described by text at the region defined by bounding boxes
68
+ >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
69
+ >>> boxes = [[0.1387, 0.2051, 0.4277, 0.7090], [0.4980, 0.4355, 0.8516, 0.7266]]
70
+ >>> phrases = ["a waterfall", "a modern high speed train"]
71
+
72
+ >>> images = pipe(
73
+ ... prompt=prompt,
74
+ ... phrases=phrases,
75
+ ... boxes=boxes,
76
+ ... gligen_scheduled_sampling_beta=0.4,
77
+ ... output_type="pil",
78
+ ... num_inference_steps=50,
79
+ ... lmd_guidance_kwargs={}
80
+ ... ).images
81
+
82
+ >>> images[0].save("./lmd_plus_generation.jpg")
83
+
84
+ >>> # Generate directly from a text prompt and an LLM response
85
+ >>> prompt = "a waterfall and a modern high speed train in a beautiful forest with fall foliage"
86
+ >>> phrases, boxes, bg_prompt, neg_prompt = pipe.parse_llm_response(\"""
87
+ [('a waterfall', [71, 105, 148, 258]), ('a modern high speed train', [255, 223, 181, 149])]
88
+ Background prompt: A beautiful forest with fall foliage
89
+ Negative prompt:
90
+ \""")
91
+
92
+ >> images = pipe(
93
+ ... prompt=prompt,
94
+ ... negative_prompt=neg_prompt,
95
+ ... phrases=phrases,
96
+ ... boxes=boxes,
97
+ ... gligen_scheduled_sampling_beta=0.4,
98
+ ... output_type="pil",
99
+ ... num_inference_steps=50,
100
+ ... lmd_guidance_kwargs={}
101
+ ... ).images
102
+
103
+ >>> images[0].save("./lmd_plus_generation.jpg")
104
+
105
+ images[0]
106
+
107
+ ```
108
+ """
109
+
110
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
111
+
112
+ # All keys in Stable Diffusion models: [('down', 0, 0, 0), ('down', 0, 1, 0), ('down', 1, 0, 0), ('down', 1, 1, 0), ('down', 2, 0, 0), ('down', 2, 1, 0), ('mid', 0, 0, 0), ('up', 1, 0, 0), ('up', 1, 1, 0), ('up', 1, 2, 0), ('up', 2, 0, 0), ('up', 2, 1, 0), ('up', 2, 2, 0), ('up', 3, 0, 0), ('up', 3, 1, 0), ('up', 3, 2, 0)]
113
+ # Note that the first up block is `UpBlock2D` rather than `CrossAttnUpBlock2D` and does not have attention. The last index is always 0 in our case since we have one `BasicTransformerBlock` in each `Transformer2DModel`.
114
+ DEFAULT_GUIDANCE_ATTN_KEYS = [
115
+ ("mid", 0, 0, 0),
116
+ ("up", 1, 0, 0),
117
+ ("up", 1, 1, 0),
118
+ ("up", 1, 2, 0),
119
+ ]
120
+
121
+
122
+ def convert_attn_keys(key):
123
+ """Convert the attention key from tuple format to the torch state format"""
124
+
125
+ if key[0] == "mid":
126
+ assert key[1] == 0, f"mid block only has one block but the index is {key[1]}"
127
+ return f"{key[0]}_block.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
128
+
129
+ return f"{key[0]}_blocks.{key[1]}.attentions.{key[2]}.transformer_blocks.{key[3]}.attn2.processor"
130
+
131
+
132
+ DEFAULT_GUIDANCE_ATTN_KEYS = [convert_attn_keys(key) for key in DEFAULT_GUIDANCE_ATTN_KEYS]
133
+
134
+
135
+ def scale_proportion(obj_box, H, W):
136
+ # Separately rounding box_w and box_h to allow shift invariant box sizes. Otherwise box sizes may change when both coordinates being rounded end with ".5".
137
+ x_min, y_min = round(obj_box[0] * W), round(obj_box[1] * H)
138
+ box_w, box_h = round((obj_box[2] - obj_box[0]) * W), round((obj_box[3] - obj_box[1]) * H)
139
+ x_max, y_max = x_min + box_w, y_min + box_h
140
+
141
+ x_min, y_min = max(x_min, 0), max(y_min, 0)
142
+ x_max, y_max = min(x_max, W), min(y_max, H)
143
+
144
+ return x_min, y_min, x_max, y_max
145
+
146
+
147
+ # Adapted from the parent class `AttnProcessor2_0`
148
+ class AttnProcessorWithHook(AttnProcessor2_0):
149
+ def __init__(
150
+ self,
151
+ attn_processor_key,
152
+ hidden_size,
153
+ cross_attention_dim,
154
+ hook=None,
155
+ fast_attn=True,
156
+ enabled=True,
157
+ ):
158
+ super().__init__()
159
+ self.attn_processor_key = attn_processor_key
160
+ self.hidden_size = hidden_size
161
+ self.cross_attention_dim = cross_attention_dim
162
+ self.hook = hook
163
+ self.fast_attn = fast_attn
164
+ self.enabled = enabled
165
+
166
+ def __call__(
167
+ self,
168
+ attn: Attention,
169
+ hidden_states,
170
+ encoder_hidden_states=None,
171
+ attention_mask=None,
172
+ temb=None,
173
+ scale: float = 1.0,
174
+ ):
175
+ residual = hidden_states
176
+
177
+ if attn.spatial_norm is not None:
178
+ hidden_states = attn.spatial_norm(hidden_states, temb)
179
+
180
+ input_ndim = hidden_states.ndim
181
+
182
+ if input_ndim == 4:
183
+ batch_size, channel, height, width = hidden_states.shape
184
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
185
+
186
+ batch_size, sequence_length, _ = (
187
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
188
+ )
189
+
190
+ if attention_mask is not None:
191
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
192
+
193
+ if attn.group_norm is not None:
194
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
195
+
196
+ args = () if USE_PEFT_BACKEND else (scale,)
197
+ query = attn.to_q(hidden_states, *args)
198
+
199
+ if encoder_hidden_states is None:
200
+ encoder_hidden_states = hidden_states
201
+ elif attn.norm_cross:
202
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
203
+
204
+ key = attn.to_k(encoder_hidden_states, *args)
205
+ value = attn.to_v(encoder_hidden_states, *args)
206
+
207
+ inner_dim = key.shape[-1]
208
+ head_dim = inner_dim // attn.heads
209
+
210
+ if (self.hook is not None and self.enabled) or not self.fast_attn:
211
+ query_batch_dim = attn.head_to_batch_dim(query)
212
+ key_batch_dim = attn.head_to_batch_dim(key)
213
+ value_batch_dim = attn.head_to_batch_dim(value)
214
+ attention_probs = attn.get_attention_scores(query_batch_dim, key_batch_dim, attention_mask)
215
+
216
+ if self.hook is not None and self.enabled:
217
+ # Call the hook with query, key, value, and attention maps
218
+ self.hook(
219
+ self.attn_processor_key,
220
+ query_batch_dim,
221
+ key_batch_dim,
222
+ value_batch_dim,
223
+ attention_probs,
224
+ )
225
+
226
+ if self.fast_attn:
227
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
228
+
229
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
230
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
231
+
232
+ if attention_mask is not None:
233
+ # scaled_dot_product_attention expects attention_mask shape to be
234
+ # (batch, heads, source_length, target_length)
235
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
236
+
237
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
238
+ # TODO: add support for attn.scale when we move to Torch 2.1
239
+ hidden_states = F.scaled_dot_product_attention(
240
+ query,
241
+ key,
242
+ value,
243
+ attn_mask=attention_mask,
244
+ dropout_p=0.0,
245
+ is_causal=False,
246
+ )
247
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
248
+ hidden_states = hidden_states.to(query.dtype)
249
+ else:
250
+ hidden_states = torch.bmm(attention_probs, value)
251
+ hidden_states = attn.batch_to_head_dim(hidden_states)
252
+
253
+ # linear proj
254
+ hidden_states = attn.to_out[0](hidden_states, *args)
255
+ # dropout
256
+ hidden_states = attn.to_out[1](hidden_states)
257
+
258
+ if input_ndim == 4:
259
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
260
+
261
+ if attn.residual_connection:
262
+ hidden_states = hidden_states + residual
263
+
264
+ hidden_states = hidden_states / attn.rescale_output_factor
265
+
266
+ return hidden_states
267
+
268
+
269
+ class LLMGroundedDiffusionPipeline(
270
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
271
+ ):
272
+ r"""
273
+ Pipeline for layout-grounded text-to-image generation using LLM-grounded Diffusion (LMD+): https://arxiv.org/pdf/2305.13655.pdf.
274
+
275
+ This model inherits from [`StableDiffusionPipeline`] and aims at implementing the pipeline with minimal modifications. Check the superclass documentation for the generic methods
276
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
277
+
278
+ This is a simplified implementation that does not perform latent or attention transfer from single object generation to overall generation. The final image is generated directly with attention and adapters control.
279
+
280
+ Args:
281
+ vae ([`AutoencoderKL`]):
282
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
283
+ text_encoder ([`~transformers.CLIPTextModel`]):
284
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
285
+ tokenizer ([`~transformers.CLIPTokenizer`]):
286
+ A `CLIPTokenizer` to tokenize text.
287
+ unet ([`UNet2DConditionModel`]):
288
+ A `UNet2DConditionModel` to denoise the encoded image latents.
289
+ scheduler ([`SchedulerMixin`]):
290
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
291
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
292
+ safety_checker ([`StableDiffusionSafetyChecker`]):
293
+ Classification module that estimates whether generated images could be considered offensive or harmful.
294
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
295
+ about a model's potential harms.
296
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
297
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
298
+ requires_safety_checker (bool):
299
+ Whether a safety checker is needed for this pipeline.
300
+ """
301
+
302
+ model_cpu_offload_seq = "text_encoder->unet->vae"
303
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
304
+ _exclude_from_cpu_offload = ["safety_checker"]
305
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
306
+
307
+ objects_text = "Objects: "
308
+ bg_prompt_text = "Background prompt: "
309
+ bg_prompt_text_no_trailing_space = bg_prompt_text.rstrip()
310
+ neg_prompt_text = "Negative prompt: "
311
+ neg_prompt_text_no_trailing_space = neg_prompt_text.rstrip()
312
+
313
+ def __init__(
314
+ self,
315
+ vae: AutoencoderKL,
316
+ text_encoder: CLIPTextModel,
317
+ tokenizer: CLIPTokenizer,
318
+ unet: UNet2DConditionModel,
319
+ scheduler: KarrasDiffusionSchedulers,
320
+ safety_checker: StableDiffusionSafetyChecker,
321
+ feature_extractor: CLIPImageProcessor,
322
+ image_encoder: CLIPVisionModelWithProjection = None,
323
+ requires_safety_checker: bool = True,
324
+ ):
325
+ # This is copied from StableDiffusionPipeline, with hook initizations for LMD+.
326
+ super().__init__()
327
+
328
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
329
+ deprecation_message = (
330
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
331
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
332
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
333
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
334
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
335
+ " file"
336
+ )
337
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
338
+ new_config = dict(scheduler.config)
339
+ new_config["steps_offset"] = 1
340
+ scheduler._internal_dict = FrozenDict(new_config)
341
+
342
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
343
+ deprecation_message = (
344
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
345
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
346
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
347
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
348
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
349
+ )
350
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
351
+ new_config = dict(scheduler.config)
352
+ new_config["clip_sample"] = False
353
+ scheduler._internal_dict = FrozenDict(new_config)
354
+
355
+ if safety_checker is None and requires_safety_checker:
356
+ logger.warning(
357
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
358
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
359
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
360
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
361
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
362
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
363
+ )
364
+
365
+ if safety_checker is not None and feature_extractor is None:
366
+ raise ValueError(
367
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
368
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
369
+ )
370
+
371
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
372
+ version.parse(unet.config._diffusers_version).base_version
373
+ ) < version.parse("0.9.0.dev0")
374
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
375
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
376
+ deprecation_message = (
377
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
378
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
379
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
380
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
381
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
382
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
383
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
384
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
385
+ " the `unet/config.json` file"
386
+ )
387
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
388
+ new_config = dict(unet.config)
389
+ new_config["sample_size"] = 64
390
+ unet._internal_dict = FrozenDict(new_config)
391
+
392
+ self.register_modules(
393
+ vae=vae,
394
+ text_encoder=text_encoder,
395
+ tokenizer=tokenizer,
396
+ unet=unet,
397
+ scheduler=scheduler,
398
+ safety_checker=safety_checker,
399
+ feature_extractor=feature_extractor,
400
+ image_encoder=image_encoder,
401
+ )
402
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
403
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
404
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
405
+
406
+ # Initialize the attention hooks for LLM-grounded Diffusion
407
+ self.register_attn_hooks(unet)
408
+ self._saved_attn = None
409
+
410
+ def attn_hook(self, name, query, key, value, attention_probs):
411
+ if name in DEFAULT_GUIDANCE_ATTN_KEYS:
412
+ self._saved_attn[name] = attention_probs
413
+
414
+ @classmethod
415
+ def convert_box(cls, box, height, width):
416
+ # box: x, y, w, h (in 512 format) -> x_min, y_min, x_max, y_max
417
+ x_min, y_min = box[0] / width, box[1] / height
418
+ w_box, h_box = box[2] / width, box[3] / height
419
+
420
+ x_max, y_max = x_min + w_box, y_min + h_box
421
+
422
+ return x_min, y_min, x_max, y_max
423
+
424
+ @classmethod
425
+ def _parse_response_with_negative(cls, text):
426
+ if not text:
427
+ raise ValueError("LLM response is empty")
428
+
429
+ if cls.objects_text in text:
430
+ text = text.split(cls.objects_text)[1]
431
+
432
+ text_split = text.split(cls.bg_prompt_text_no_trailing_space)
433
+ if len(text_split) == 2:
434
+ gen_boxes, text_rem = text_split
435
+ else:
436
+ raise ValueError(f"LLM response is incomplete: {text}")
437
+
438
+ text_split = text_rem.split(cls.neg_prompt_text_no_trailing_space)
439
+
440
+ if len(text_split) == 2:
441
+ bg_prompt, neg_prompt = text_split
442
+ else:
443
+ raise ValueError(f"LLM response is incomplete: {text}")
444
+
445
+ try:
446
+ gen_boxes = ast.literal_eval(gen_boxes)
447
+ except SyntaxError as e:
448
+ # Sometimes the response is in plain text
449
+ if "No objects" in gen_boxes or gen_boxes.strip() == "":
450
+ gen_boxes = []
451
+ else:
452
+ raise e
453
+ bg_prompt = bg_prompt.strip()
454
+ neg_prompt = neg_prompt.strip()
455
+
456
+ # LLM may return "None" to mean no negative prompt provided.
457
+ if neg_prompt == "None":
458
+ neg_prompt = ""
459
+
460
+ return gen_boxes, bg_prompt, neg_prompt
461
+
462
+ @classmethod
463
+ def parse_llm_response(cls, response, canvas_height=512, canvas_width=512):
464
+ # Infer from spec
465
+ gen_boxes, bg_prompt, neg_prompt = cls._parse_response_with_negative(text=response)
466
+
467
+ gen_boxes = sorted(gen_boxes, key=lambda gen_box: gen_box[0])
468
+
469
+ phrases = [name for name, _ in gen_boxes]
470
+ boxes = [cls.convert_box(box, height=canvas_height, width=canvas_width) for _, box in gen_boxes]
471
+
472
+ return phrases, boxes, bg_prompt, neg_prompt
473
+
474
+ def check_inputs(
475
+ self,
476
+ prompt,
477
+ height,
478
+ width,
479
+ callback_steps,
480
+ phrases,
481
+ boxes,
482
+ negative_prompt=None,
483
+ prompt_embeds=None,
484
+ negative_prompt_embeds=None,
485
+ phrase_indices=None,
486
+ ):
487
+ if height % 8 != 0 or width % 8 != 0:
488
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
489
+
490
+ if (callback_steps is None) or (
491
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
492
+ ):
493
+ raise ValueError(
494
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
495
+ f" {type(callback_steps)}."
496
+ )
497
+
498
+ if prompt is not None and prompt_embeds is not None:
499
+ raise ValueError(
500
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
501
+ " only forward one of the two."
502
+ )
503
+ elif prompt is None and prompt_embeds is None:
504
+ raise ValueError(
505
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
506
+ )
507
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
508
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
509
+ elif prompt is None and phrase_indices is None:
510
+ raise ValueError("If the prompt is None, the phrase_indices cannot be None")
511
+
512
+ if negative_prompt is not None and negative_prompt_embeds is not None:
513
+ raise ValueError(
514
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
515
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
516
+ )
517
+
518
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
519
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
520
+ raise ValueError(
521
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
522
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
523
+ f" {negative_prompt_embeds.shape}."
524
+ )
525
+
526
+ if len(phrases) != len(boxes):
527
+ ValueError(
528
+ "length of `phrases` and `boxes` has to be same, but"
529
+ f" got: `phrases` {len(phrases)} != `boxes` {len(boxes)}"
530
+ )
531
+
532
+ def register_attn_hooks(self, unet):
533
+ """Registering hooks to obtain the attention maps for guidance"""
534
+
535
+ attn_procs = {}
536
+
537
+ for name in unet.attn_processors.keys():
538
+ # Only obtain the queries and keys from cross-attention
539
+ if name.endswith("attn1.processor") or name.endswith("fuser.attn.processor"):
540
+ # Keep the same attn_processors for self-attention (no hooks for self-attention)
541
+ attn_procs[name] = unet.attn_processors[name]
542
+ continue
543
+
544
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
545
+
546
+ if name.startswith("mid_block"):
547
+ hidden_size = unet.config.block_out_channels[-1]
548
+ elif name.startswith("up_blocks"):
549
+ block_id = int(name[len("up_blocks.")])
550
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
551
+ elif name.startswith("down_blocks"):
552
+ block_id = int(name[len("down_blocks.")])
553
+ hidden_size = unet.config.block_out_channels[block_id]
554
+
555
+ attn_procs[name] = AttnProcessorWithHook(
556
+ attn_processor_key=name,
557
+ hidden_size=hidden_size,
558
+ cross_attention_dim=cross_attention_dim,
559
+ hook=self.attn_hook,
560
+ fast_attn=True,
561
+ # Not enabled by default
562
+ enabled=False,
563
+ )
564
+
565
+ unet.set_attn_processor(attn_procs)
566
+
567
+ def enable_fuser(self, enabled=True):
568
+ for module in self.unet.modules():
569
+ if isinstance(module, GatedSelfAttentionDense):
570
+ module.enabled = enabled
571
+
572
+ def enable_attn_hook(self, enabled=True):
573
+ for module in self.unet.attn_processors.values():
574
+ if isinstance(module, AttnProcessorWithHook):
575
+ module.enabled = enabled
576
+
577
+ def get_token_map(self, prompt, padding="do_not_pad", verbose=False):
578
+ """Get a list of mapping: prompt index to str (prompt in a list of token str)"""
579
+ fg_prompt_tokens = self.tokenizer([prompt], padding=padding, max_length=77, return_tensors="np")
580
+ input_ids = fg_prompt_tokens["input_ids"][0]
581
+
582
+ token_map = []
583
+ for ind, item in enumerate(input_ids.tolist()):
584
+ token = self.tokenizer._convert_id_to_token(item)
585
+
586
+ if verbose:
587
+ logger.info(f"{ind}, {token} ({item})")
588
+
589
+ token_map.append(token)
590
+
591
+ return token_map
592
+
593
+ def get_phrase_indices(
594
+ self,
595
+ prompt,
596
+ phrases,
597
+ token_map=None,
598
+ add_suffix_if_not_found=False,
599
+ verbose=False,
600
+ ):
601
+ for obj in phrases:
602
+ # Suffix the prompt with object name for attention guidance if object is not in the prompt, using "|" to separate the prompt and the suffix
603
+ if obj not in prompt:
604
+ prompt += "| " + obj
605
+
606
+ if token_map is None:
607
+ # We allow using a pre-computed token map.
608
+ token_map = self.get_token_map(prompt=prompt, padding="do_not_pad", verbose=verbose)
609
+ token_map_str = " ".join(token_map)
610
+
611
+ phrase_indices = []
612
+
613
+ for obj in phrases:
614
+ phrase_token_map = self.get_token_map(prompt=obj, padding="do_not_pad", verbose=verbose)
615
+ # Remove <bos> and <eos> in substr
616
+ phrase_token_map = phrase_token_map[1:-1]
617
+ phrase_token_map_len = len(phrase_token_map)
618
+ phrase_token_map_str = " ".join(phrase_token_map)
619
+
620
+ if verbose:
621
+ logger.info(
622
+ "Full str:",
623
+ token_map_str,
624
+ "Substr:",
625
+ phrase_token_map_str,
626
+ "Phrase:",
627
+ phrases,
628
+ )
629
+
630
+ # Count the number of token before substr
631
+ # The substring comes with a trailing space that needs to be removed by minus one in the index.
632
+ obj_first_index = len(token_map_str[: token_map_str.index(phrase_token_map_str) - 1].split(" "))
633
+
634
+ obj_position = list(range(obj_first_index, obj_first_index + phrase_token_map_len))
635
+ phrase_indices.append(obj_position)
636
+
637
+ if add_suffix_if_not_found:
638
+ return phrase_indices, prompt
639
+
640
+ return phrase_indices
641
+
642
+ def add_ca_loss_per_attn_map_to_loss(
643
+ self,
644
+ loss,
645
+ attn_map,
646
+ object_number,
647
+ bboxes,
648
+ phrase_indices,
649
+ fg_top_p=0.2,
650
+ bg_top_p=0.2,
651
+ fg_weight=1.0,
652
+ bg_weight=1.0,
653
+ ):
654
+ # b is the number of heads, not batch
655
+ b, i, j = attn_map.shape
656
+ H = W = int(math.sqrt(i))
657
+ for obj_idx in range(object_number):
658
+ obj_loss = 0
659
+ mask = torch.zeros(size=(H, W), device="cuda")
660
+ obj_boxes = bboxes[obj_idx]
661
+
662
+ # We support two level (one box per phrase) and three level (multiple boxes per phrase)
663
+ if not isinstance(obj_boxes[0], Iterable):
664
+ obj_boxes = [obj_boxes]
665
+
666
+ for obj_box in obj_boxes:
667
+ # x_min, y_min, x_max, y_max = int(obj_box[0] * W), int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H)
668
+ x_min, y_min, x_max, y_max = scale_proportion(obj_box, H=H, W=W)
669
+ mask[y_min:y_max, x_min:x_max] = 1
670
+
671
+ for obj_position in phrase_indices[obj_idx]:
672
+ # Could potentially optimize to compute this for loop in batch.
673
+ # Could crop the ref cross attention before saving to save memory.
674
+
675
+ ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W)
676
+
677
+ # shape: (b, H * W)
678
+ ca_map_obj = attn_map[:, :, obj_position] # .reshape(b, H, W)
679
+ k_fg = (mask.sum() * fg_top_p).long().clamp_(min=1)
680
+ k_bg = ((1 - mask).sum() * bg_top_p).long().clamp_(min=1)
681
+
682
+ mask_1d = mask.view(1, -1)
683
+
684
+ # Max-based loss function
685
+
686
+ # Take the topk over spatial dimension, and then take the sum over heads dim
687
+ # The mean is over k_fg and k_bg dimension, so we don't need to sum and divide on our own.
688
+ obj_loss += (1 - (ca_map_obj * mask_1d).topk(k=k_fg).values.mean(dim=1)).sum(dim=0) * fg_weight
689
+ obj_loss += ((ca_map_obj * (1 - mask_1d)).topk(k=k_bg).values.mean(dim=1)).sum(dim=0) * bg_weight
690
+
691
+ loss += obj_loss / len(phrase_indices[obj_idx])
692
+
693
+ return loss
694
+
695
+ def compute_ca_loss(
696
+ self,
697
+ saved_attn,
698
+ bboxes,
699
+ phrase_indices,
700
+ guidance_attn_keys,
701
+ verbose=False,
702
+ **kwargs,
703
+ ):
704
+ """
705
+ The `saved_attn` is supposed to be passed to `save_attn_to_dict` in `cross_attention_kwargs` prior to computing ths loss.
706
+ `AttnProcessor` will put attention maps into the `save_attn_to_dict`.
707
+
708
+ `index` is the timestep.
709
+ `ref_ca_word_token_only`: This has precedence over `ref_ca_last_token_only` (i.e., if both are enabled, we take the token from word rather than the last token).
710
+ `ref_ca_last_token_only`: `ref_ca_saved_attn` comes from the attention map of the last token of the phrase in single object generation, so we apply it only to the last token of the phrase in overall generation if this is set to True. If set to False, `ref_ca_saved_attn` will be applied to all the text tokens.
711
+ """
712
+ loss = torch.tensor(0).float().cuda()
713
+ object_number = len(bboxes)
714
+ if object_number == 0:
715
+ return loss
716
+
717
+ for attn_key in guidance_attn_keys:
718
+ # We only have 1 cross attention for mid.
719
+
720
+ attn_map_integrated = saved_attn[attn_key]
721
+ if not attn_map_integrated.is_cuda:
722
+ attn_map_integrated = attn_map_integrated.cuda()
723
+ # Example dimension: [20, 64, 77]
724
+ attn_map = attn_map_integrated.squeeze(dim=0)
725
+
726
+ loss = self.add_ca_loss_per_attn_map_to_loss(
727
+ loss, attn_map, object_number, bboxes, phrase_indices, **kwargs
728
+ )
729
+
730
+ num_attn = len(guidance_attn_keys)
731
+
732
+ if num_attn > 0:
733
+ loss = loss / (object_number * num_attn)
734
+
735
+ return loss
736
+
737
+ @torch.no_grad()
738
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
739
+ def __call__(
740
+ self,
741
+ prompt: Union[str, List[str]] = None,
742
+ height: Optional[int] = None,
743
+ width: Optional[int] = None,
744
+ num_inference_steps: int = 50,
745
+ guidance_scale: float = 7.5,
746
+ gligen_scheduled_sampling_beta: float = 0.3,
747
+ phrases: List[str] = None,
748
+ boxes: List[List[float]] = None,
749
+ negative_prompt: Optional[Union[str, List[str]]] = None,
750
+ num_images_per_prompt: Optional[int] = 1,
751
+ eta: float = 0.0,
752
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
753
+ latents: Optional[torch.FloatTensor] = None,
754
+ prompt_embeds: Optional[torch.FloatTensor] = None,
755
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
756
+ ip_adapter_image: Optional[PipelineImageInput] = None,
757
+ output_type: Optional[str] = "pil",
758
+ return_dict: bool = True,
759
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
760
+ callback_steps: int = 1,
761
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
762
+ clip_skip: Optional[int] = None,
763
+ lmd_guidance_kwargs: Optional[Dict[str, Any]] = {},
764
+ phrase_indices: Optional[List[int]] = None,
765
+ ):
766
+ r"""
767
+ The call function to the pipeline for generation.
768
+
769
+ Args:
770
+ prompt (`str` or `List[str]`, *optional*):
771
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
772
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
773
+ The height in pixels of the generated image.
774
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
775
+ The width in pixels of the generated image.
776
+ num_inference_steps (`int`, *optional*, defaults to 50):
777
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
778
+ expense of slower inference.
779
+ guidance_scale (`float`, *optional*, defaults to 7.5):
780
+ A higher guidance scale value encourages the model to generate images closely linked to the text
781
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
782
+ phrases (`List[str]`):
783
+ The phrases to guide what to include in each of the regions defined by the corresponding
784
+ `boxes`. There should only be one phrase per bounding box.
785
+ boxes (`List[List[float]]`):
786
+ The bounding boxes that identify rectangular regions of the image that are going to be filled with the
787
+ content described by the corresponding `phrases`. Each rectangular box is defined as a
788
+ `List[float]` of 4 elements `[xmin, ymin, xmax, ymax]` where each value is between [0,1].
789
+ gligen_scheduled_sampling_beta (`float`, defaults to 0.3):
790
+ Scheduled Sampling factor from [GLIGEN: Open-Set Grounded Text-to-Image
791
+ Generation](https://arxiv.org/pdf/2301.07093.pdf). Scheduled Sampling factor is only varied for
792
+ scheduled sampling during inference for improved quality and controllability.
793
+ negative_prompt (`str` or `List[str]`, *optional*):
794
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
795
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
796
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
797
+ The number of images to generate per prompt.
798
+ eta (`float`, *optional*, defaults to 0.0):
799
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
800
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
801
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
802
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
803
+ generation deterministic.
804
+ latents (`torch.FloatTensor`, *optional*):
805
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
806
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
807
+ tensor is generated by sampling using the supplied random `generator`.
808
+ prompt_embeds (`torch.FloatTensor`, *optional*):
809
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
810
+ provided, text embeddings are generated from the `prompt` input argument.
811
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
812
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
813
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
814
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
815
+ output_type (`str`, *optional*, defaults to `"pil"`):
816
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
817
+ return_dict (`bool`, *optional*, defaults to `True`):
818
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
819
+ plain tuple.
820
+ callback (`Callable`, *optional*):
821
+ A function that calls every `callback_steps` steps during inference. The function is called with the
822
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
823
+ callback_steps (`int`, *optional*, defaults to 1):
824
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
825
+ every step.
826
+ cross_attention_kwargs (`dict`, *optional*):
827
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
828
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
829
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
830
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
831
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
832
+ using zero terminal SNR.
833
+ clip_skip (`int`, *optional*):
834
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
835
+ the output of the pre-final layer will be used for computing the prompt embeddings.
836
+ lmd_guidance_kwargs (`dict`, *optional*):
837
+ A kwargs dictionary that if specified is passed along to `latent_lmd_guidance` function. Useful keys include `loss_scale` (the guidance strength), `loss_threshold` (when loss is lower than this value, the guidance is not applied anymore), `max_iter` (the number of iterations of guidance for each step), and `guidance_timesteps` (the number of diffusion timesteps to apply guidance on). See `latent_lmd_guidance` for implementation details.
838
+ phrase_indices (`list` of `list`, *optional*): The indices of the tokens of each phrase in the overall prompt. If omitted, the pipeline will match the first token subsequence. The pipeline will append the missing phrases to the end of the prompt by default.
839
+ Examples:
840
+
841
+ Returns:
842
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
843
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
844
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
845
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
846
+ "not-safe-for-work" (nsfw) content.
847
+ """
848
+ # 0. Default height and width to unet
849
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
850
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
851
+
852
+ # 1. Check inputs. Raise error if not correct
853
+ self.check_inputs(
854
+ prompt,
855
+ height,
856
+ width,
857
+ callback_steps,
858
+ phrases,
859
+ boxes,
860
+ negative_prompt,
861
+ prompt_embeds,
862
+ negative_prompt_embeds,
863
+ phrase_indices,
864
+ )
865
+
866
+ # 2. Define call parameters
867
+ if prompt is not None and isinstance(prompt, str):
868
+ batch_size = 1
869
+ if phrase_indices is None:
870
+ phrase_indices, prompt = self.get_phrase_indices(prompt, phrases, add_suffix_if_not_found=True)
871
+ elif prompt is not None and isinstance(prompt, list):
872
+ batch_size = len(prompt)
873
+ if phrase_indices is None:
874
+ phrase_indices = []
875
+ prompt_parsed = []
876
+ for prompt_item in prompt:
877
+ (
878
+ phrase_indices_parsed_item,
879
+ prompt_parsed_item,
880
+ ) = self.get_phrase_indices(prompt_item, add_suffix_if_not_found=True)
881
+ phrase_indices.append(phrase_indices_parsed_item)
882
+ prompt_parsed.append(prompt_parsed_item)
883
+ prompt = prompt_parsed
884
+ else:
885
+ batch_size = prompt_embeds.shape[0]
886
+
887
+ device = self._execution_device
888
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
889
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
890
+ # corresponds to doing no classifier free guidance.
891
+ do_classifier_free_guidance = guidance_scale > 1.0
892
+
893
+ # 3. Encode input prompt
894
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
895
+ prompt,
896
+ device,
897
+ num_images_per_prompt,
898
+ do_classifier_free_guidance,
899
+ negative_prompt,
900
+ prompt_embeds=prompt_embeds,
901
+ negative_prompt_embeds=negative_prompt_embeds,
902
+ clip_skip=clip_skip,
903
+ )
904
+
905
+ cond_prompt_embeds = prompt_embeds
906
+
907
+ # For classifier free guidance, we need to do two forward passes.
908
+ # Here we concatenate the unconditional and text embeddings into a single batch
909
+ # to avoid doing two forward passes
910
+ if do_classifier_free_guidance:
911
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
912
+
913
+ if ip_adapter_image is not None:
914
+ image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
915
+ if self.do_classifier_free_guidance:
916
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
917
+
918
+ # 4. Prepare timesteps
919
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
920
+ timesteps = self.scheduler.timesteps
921
+
922
+ # 5. Prepare latent variables
923
+ num_channels_latents = self.unet.config.in_channels
924
+ latents = self.prepare_latents(
925
+ batch_size * num_images_per_prompt,
926
+ num_channels_latents,
927
+ height,
928
+ width,
929
+ prompt_embeds.dtype,
930
+ device,
931
+ generator,
932
+ latents,
933
+ )
934
+
935
+ # 5.1 Prepare GLIGEN variables
936
+ max_objs = 30
937
+ if len(boxes) > max_objs:
938
+ warnings.warn(
939
+ f"More that {max_objs} objects found. Only first {max_objs} objects will be processed.",
940
+ FutureWarning,
941
+ )
942
+ phrases = phrases[:max_objs]
943
+ boxes = boxes[:max_objs]
944
+
945
+ n_objs = len(boxes)
946
+ if n_objs:
947
+ # prepare batched input to the PositionNet (boxes, phrases, mask)
948
+ # Get tokens for phrases from pre-trained CLIPTokenizer
949
+ tokenizer_inputs = self.tokenizer(phrases, padding=True, return_tensors="pt").to(device)
950
+ # For the token, we use the same pre-trained text encoder
951
+ # to obtain its text feature
952
+ _text_embeddings = self.text_encoder(**tokenizer_inputs).pooler_output
953
+
954
+ # For each entity, described in phrases, is denoted with a bounding box,
955
+ # we represent the location information as (xmin,ymin,xmax,ymax)
956
+ cond_boxes = torch.zeros(max_objs, 4, device=device, dtype=self.text_encoder.dtype)
957
+ if n_objs:
958
+ cond_boxes[:n_objs] = torch.tensor(boxes)
959
+ text_embeddings = torch.zeros(
960
+ max_objs,
961
+ self.unet.config.cross_attention_dim,
962
+ device=device,
963
+ dtype=self.text_encoder.dtype,
964
+ )
965
+ if n_objs:
966
+ text_embeddings[:n_objs] = _text_embeddings
967
+ # Generate a mask for each object that is entity described by phrases
968
+ masks = torch.zeros(max_objs, device=device, dtype=self.text_encoder.dtype)
969
+ masks[:n_objs] = 1
970
+
971
+ repeat_batch = batch_size * num_images_per_prompt
972
+ cond_boxes = cond_boxes.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
973
+ text_embeddings = text_embeddings.unsqueeze(0).expand(repeat_batch, -1, -1).clone()
974
+ masks = masks.unsqueeze(0).expand(repeat_batch, -1).clone()
975
+ if do_classifier_free_guidance:
976
+ repeat_batch = repeat_batch * 2
977
+ cond_boxes = torch.cat([cond_boxes] * 2)
978
+ text_embeddings = torch.cat([text_embeddings] * 2)
979
+ masks = torch.cat([masks] * 2)
980
+ masks[: repeat_batch // 2] = 0
981
+ if cross_attention_kwargs is None:
982
+ cross_attention_kwargs = {}
983
+ cross_attention_kwargs["gligen"] = {
984
+ "boxes": cond_boxes,
985
+ "positive_embeddings": text_embeddings,
986
+ "masks": masks,
987
+ }
988
+
989
+ num_grounding_steps = int(gligen_scheduled_sampling_beta * len(timesteps))
990
+ self.enable_fuser(True)
991
+
992
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
993
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
994
+
995
+ # 6.1 Add image embeds for IP-Adapter
996
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
997
+
998
+ loss_attn = torch.tensor(10000.0)
999
+
1000
+ # 7. Denoising loop
1001
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1002
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1003
+ for i, t in enumerate(timesteps):
1004
+ # Scheduled sampling
1005
+ if i == num_grounding_steps:
1006
+ self.enable_fuser(False)
1007
+
1008
+ if latents.shape[1] != 4:
1009
+ latents = torch.randn_like(latents[:, :4])
1010
+
1011
+ # 7.1 Perform LMD guidance
1012
+ if boxes:
1013
+ latents, loss_attn = self.latent_lmd_guidance(
1014
+ cond_prompt_embeds,
1015
+ index=i,
1016
+ boxes=boxes,
1017
+ phrase_indices=phrase_indices,
1018
+ t=t,
1019
+ latents=latents,
1020
+ loss=loss_attn,
1021
+ **lmd_guidance_kwargs,
1022
+ )
1023
+
1024
+ # expand the latents if we are doing classifier free guidance
1025
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1026
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1027
+
1028
+ # predict the noise residual
1029
+ noise_pred = self.unet(
1030
+ latent_model_input,
1031
+ t,
1032
+ encoder_hidden_states=prompt_embeds,
1033
+ cross_attention_kwargs=cross_attention_kwargs,
1034
+ added_cond_kwargs=added_cond_kwargs,
1035
+ ).sample
1036
+
1037
+ # perform guidance
1038
+ if do_classifier_free_guidance:
1039
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1040
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1041
+
1042
+ # compute the previous noisy sample x_t -> x_t-1
1043
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1044
+
1045
+ # call the callback, if provided
1046
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1047
+ progress_bar.update()
1048
+ if callback is not None and i % callback_steps == 0:
1049
+ step_idx = i // getattr(self.scheduler, "order", 1)
1050
+ callback(step_idx, t, latents)
1051
+
1052
+ if not output_type == "latent":
1053
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1054
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1055
+ else:
1056
+ image = latents
1057
+ has_nsfw_concept = None
1058
+
1059
+ if has_nsfw_concept is None:
1060
+ do_denormalize = [True] * image.shape[0]
1061
+ else:
1062
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1063
+
1064
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1065
+
1066
+ # Offload last model to CPU
1067
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1068
+ self.final_offload_hook.offload()
1069
+
1070
+ if not return_dict:
1071
+ return (image, has_nsfw_concept)
1072
+
1073
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1074
+
1075
+ @torch.set_grad_enabled(True)
1076
+ def latent_lmd_guidance(
1077
+ self,
1078
+ cond_embeddings,
1079
+ index,
1080
+ boxes,
1081
+ phrase_indices,
1082
+ t,
1083
+ latents,
1084
+ loss,
1085
+ *,
1086
+ loss_scale=20,
1087
+ loss_threshold=5.0,
1088
+ max_iter=[3] * 5 + [2] * 5 + [1] * 5,
1089
+ guidance_timesteps=15,
1090
+ cross_attention_kwargs=None,
1091
+ guidance_attn_keys=DEFAULT_GUIDANCE_ATTN_KEYS,
1092
+ verbose=False,
1093
+ clear_cache=False,
1094
+ unet_additional_kwargs={},
1095
+ guidance_callback=None,
1096
+ **kwargs,
1097
+ ):
1098
+ scheduler, unet = self.scheduler, self.unet
1099
+
1100
+ iteration = 0
1101
+
1102
+ if index < guidance_timesteps:
1103
+ if isinstance(max_iter, list):
1104
+ max_iter = max_iter[index]
1105
+
1106
+ if verbose:
1107
+ logger.info(
1108
+ f"time index {index}, loss: {loss.item()/loss_scale:.3f} (de-scaled with scale {loss_scale:.1f}), loss threshold: {loss_threshold:.3f}"
1109
+ )
1110
+
1111
+ try:
1112
+ self.enable_attn_hook(enabled=True)
1113
+
1114
+ while (
1115
+ loss.item() / loss_scale > loss_threshold and iteration < max_iter and index < guidance_timesteps
1116
+ ):
1117
+ self._saved_attn = {}
1118
+
1119
+ latents.requires_grad_(True)
1120
+ latent_model_input = latents
1121
+ latent_model_input = scheduler.scale_model_input(latent_model_input, t)
1122
+
1123
+ unet(
1124
+ latent_model_input,
1125
+ t,
1126
+ encoder_hidden_states=cond_embeddings,
1127
+ cross_attention_kwargs=cross_attention_kwargs,
1128
+ **unet_additional_kwargs,
1129
+ )
1130
+
1131
+ # update latents with guidance
1132
+ loss = (
1133
+ self.compute_ca_loss(
1134
+ saved_attn=self._saved_attn,
1135
+ bboxes=boxes,
1136
+ phrase_indices=phrase_indices,
1137
+ guidance_attn_keys=guidance_attn_keys,
1138
+ verbose=verbose,
1139
+ **kwargs,
1140
+ )
1141
+ * loss_scale
1142
+ )
1143
+
1144
+ if torch.isnan(loss):
1145
+ raise RuntimeError("**Loss is NaN**")
1146
+
1147
+ # This callback allows visualizations.
1148
+ if guidance_callback is not None:
1149
+ guidance_callback(self, latents, loss, iteration, index)
1150
+
1151
+ self._saved_attn = None
1152
+
1153
+ grad_cond = torch.autograd.grad(loss.requires_grad_(True), [latents])[0]
1154
+
1155
+ latents.requires_grad_(False)
1156
+
1157
+ # Scaling with classifier guidance
1158
+ alpha_prod_t = scheduler.alphas_cumprod[t]
1159
+ # Classifier guidance: https://arxiv.org/pdf/2105.05233.pdf
1160
+ # DDIM: https://arxiv.org/pdf/2010.02502.pdf
1161
+ scale = (1 - alpha_prod_t) ** (0.5)
1162
+ latents = latents - scale * grad_cond
1163
+
1164
+ iteration += 1
1165
+
1166
+ if clear_cache:
1167
+ gc.collect()
1168
+ torch.cuda.empty_cache()
1169
+
1170
+ if verbose:
1171
+ logger.info(
1172
+ f"time index {index}, loss: {loss.item()/loss_scale:.3f}, loss threshold: {loss_threshold:.3f}, iteration: {iteration}"
1173
+ )
1174
+
1175
+ finally:
1176
+ self.enable_attn_hook(enabled=False)
1177
+
1178
+ return latents, loss
1179
+
1180
+ # Below are methods copied from StableDiffusionPipeline
1181
+ # The design choice of not inheriting from StableDiffusionPipeline is discussed here: https://github.com/huggingface/diffusers/pull/5993#issuecomment-1834258517
1182
+
1183
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
1184
+ def enable_vae_slicing(self):
1185
+ r"""
1186
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
1187
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
1188
+ """
1189
+ self.vae.enable_slicing()
1190
+
1191
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
1192
+ def disable_vae_slicing(self):
1193
+ r"""
1194
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
1195
+ computing decoding in one step.
1196
+ """
1197
+ self.vae.disable_slicing()
1198
+
1199
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
1200
+ def enable_vae_tiling(self):
1201
+ r"""
1202
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
1203
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
1204
+ processing larger images.
1205
+ """
1206
+ self.vae.enable_tiling()
1207
+
1208
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
1209
+ def disable_vae_tiling(self):
1210
+ r"""
1211
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
1212
+ computing decoding in one step.
1213
+ """
1214
+ self.vae.disable_tiling()
1215
+
1216
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
1217
+ def _encode_prompt(
1218
+ self,
1219
+ prompt,
1220
+ device,
1221
+ num_images_per_prompt,
1222
+ do_classifier_free_guidance,
1223
+ negative_prompt=None,
1224
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1225
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1226
+ lora_scale: Optional[float] = None,
1227
+ **kwargs,
1228
+ ):
1229
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
1230
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
1231
+
1232
+ prompt_embeds_tuple = self.encode_prompt(
1233
+ prompt=prompt,
1234
+ device=device,
1235
+ num_images_per_prompt=num_images_per_prompt,
1236
+ do_classifier_free_guidance=do_classifier_free_guidance,
1237
+ negative_prompt=negative_prompt,
1238
+ prompt_embeds=prompt_embeds,
1239
+ negative_prompt_embeds=negative_prompt_embeds,
1240
+ lora_scale=lora_scale,
1241
+ **kwargs,
1242
+ )
1243
+
1244
+ # concatenate for backwards comp
1245
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
1246
+
1247
+ return prompt_embeds
1248
+
1249
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
1250
+ def encode_prompt(
1251
+ self,
1252
+ prompt,
1253
+ device,
1254
+ num_images_per_prompt,
1255
+ do_classifier_free_guidance,
1256
+ negative_prompt=None,
1257
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1258
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1259
+ lora_scale: Optional[float] = None,
1260
+ clip_skip: Optional[int] = None,
1261
+ ):
1262
+ r"""
1263
+ Encodes the prompt into text encoder hidden states.
1264
+
1265
+ Args:
1266
+ prompt (`str` or `List[str]`, *optional*):
1267
+ prompt to be encoded
1268
+ device: (`torch.device`):
1269
+ torch device
1270
+ num_images_per_prompt (`int`):
1271
+ number of images that should be generated per prompt
1272
+ do_classifier_free_guidance (`bool`):
1273
+ whether to use classifier free guidance or not
1274
+ negative_prompt (`str` or `List[str]`, *optional*):
1275
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1276
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1277
+ less than `1`).
1278
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1279
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1280
+ provided, text embeddings will be generated from `prompt` input argument.
1281
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1282
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1283
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1284
+ argument.
1285
+ lora_scale (`float`, *optional*):
1286
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
1287
+ clip_skip (`int`, *optional*):
1288
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1289
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1290
+ """
1291
+ # set lora scale so that monkey patched LoRA
1292
+ # function of text encoder can correctly access it
1293
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
1294
+ self._lora_scale = lora_scale
1295
+
1296
+ # dynamically adjust the LoRA scale
1297
+ if not USE_PEFT_BACKEND:
1298
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
1299
+ else:
1300
+ scale_lora_layers(self.text_encoder, lora_scale)
1301
+
1302
+ if prompt is not None and isinstance(prompt, str):
1303
+ batch_size = 1
1304
+ elif prompt is not None and isinstance(prompt, list):
1305
+ batch_size = len(prompt)
1306
+ else:
1307
+ batch_size = prompt_embeds.shape[0]
1308
+
1309
+ if prompt_embeds is None:
1310
+ # textual inversion: procecss multi-vector tokens if necessary
1311
+ if isinstance(self, TextualInversionLoaderMixin):
1312
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
1313
+
1314
+ text_inputs = self.tokenizer(
1315
+ prompt,
1316
+ padding="max_length",
1317
+ max_length=self.tokenizer.model_max_length,
1318
+ truncation=True,
1319
+ return_tensors="pt",
1320
+ )
1321
+ text_input_ids = text_inputs.input_ids
1322
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
1323
+
1324
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
1325
+ text_input_ids, untruncated_ids
1326
+ ):
1327
+ removed_text = self.tokenizer.batch_decode(
1328
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
1329
+ )
1330
+ logger.warning(
1331
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
1332
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
1333
+ )
1334
+
1335
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
1336
+ attention_mask = text_inputs.attention_mask.to(device)
1337
+ else:
1338
+ attention_mask = None
1339
+
1340
+ if clip_skip is None:
1341
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
1342
+ prompt_embeds = prompt_embeds[0]
1343
+ else:
1344
+ prompt_embeds = self.text_encoder(
1345
+ text_input_ids.to(device),
1346
+ attention_mask=attention_mask,
1347
+ output_hidden_states=True,
1348
+ )
1349
+ # Access the `hidden_states` first, that contains a tuple of
1350
+ # all the hidden states from the encoder layers. Then index into
1351
+ # the tuple to access the hidden states from the desired layer.
1352
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
1353
+ # We also need to apply the final LayerNorm here to not mess with the
1354
+ # representations. The `last_hidden_states` that we typically use for
1355
+ # obtaining the final prompt representations passes through the LayerNorm
1356
+ # layer.
1357
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
1358
+
1359
+ if self.text_encoder is not None:
1360
+ prompt_embeds_dtype = self.text_encoder.dtype
1361
+ elif self.unet is not None:
1362
+ prompt_embeds_dtype = self.unet.dtype
1363
+ else:
1364
+ prompt_embeds_dtype = prompt_embeds.dtype
1365
+
1366
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
1367
+
1368
+ bs_embed, seq_len, _ = prompt_embeds.shape
1369
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
1370
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
1371
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
1372
+
1373
+ # get unconditional embeddings for classifier free guidance
1374
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
1375
+ uncond_tokens: List[str]
1376
+ if negative_prompt is None:
1377
+ uncond_tokens = [""] * batch_size
1378
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
1379
+ raise TypeError(
1380
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
1381
+ f" {type(prompt)}."
1382
+ )
1383
+ elif isinstance(negative_prompt, str):
1384
+ uncond_tokens = [negative_prompt]
1385
+ elif batch_size != len(negative_prompt):
1386
+ raise ValueError(
1387
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
1388
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
1389
+ " the batch size of `prompt`."
1390
+ )
1391
+ else:
1392
+ uncond_tokens = negative_prompt
1393
+
1394
+ # textual inversion: procecss multi-vector tokens if necessary
1395
+ if isinstance(self, TextualInversionLoaderMixin):
1396
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
1397
+
1398
+ max_length = prompt_embeds.shape[1]
1399
+ uncond_input = self.tokenizer(
1400
+ uncond_tokens,
1401
+ padding="max_length",
1402
+ max_length=max_length,
1403
+ truncation=True,
1404
+ return_tensors="pt",
1405
+ )
1406
+
1407
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
1408
+ attention_mask = uncond_input.attention_mask.to(device)
1409
+ else:
1410
+ attention_mask = None
1411
+
1412
+ negative_prompt_embeds = self.text_encoder(
1413
+ uncond_input.input_ids.to(device),
1414
+ attention_mask=attention_mask,
1415
+ )
1416
+ negative_prompt_embeds = negative_prompt_embeds[0]
1417
+
1418
+ if do_classifier_free_guidance:
1419
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
1420
+ seq_len = negative_prompt_embeds.shape[1]
1421
+
1422
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
1423
+
1424
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
1425
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
1426
+
1427
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
1428
+ # Retrieve the original scale by scaling back the LoRA layers
1429
+ unscale_lora_layers(self.text_encoder, lora_scale)
1430
+
1431
+ return prompt_embeds, negative_prompt_embeds
1432
+
1433
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
1434
+ def encode_image(self, image, device, num_images_per_prompt):
1435
+ dtype = next(self.image_encoder.parameters()).dtype
1436
+
1437
+ if not isinstance(image, torch.Tensor):
1438
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
1439
+
1440
+ image = image.to(device=device, dtype=dtype)
1441
+ image_embeds = self.image_encoder(image).image_embeds
1442
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
1443
+
1444
+ uncond_image_embeds = torch.zeros_like(image_embeds)
1445
+ return image_embeds, uncond_image_embeds
1446
+
1447
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
1448
+ def run_safety_checker(self, image, device, dtype):
1449
+ if self.safety_checker is None:
1450
+ has_nsfw_concept = None
1451
+ else:
1452
+ if torch.is_tensor(image):
1453
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
1454
+ else:
1455
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
1456
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
1457
+ image, has_nsfw_concept = self.safety_checker(
1458
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
1459
+ )
1460
+ return image, has_nsfw_concept
1461
+
1462
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
1463
+ def decode_latents(self, latents):
1464
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
1465
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
1466
+
1467
+ latents = 1 / self.vae.config.scaling_factor * latents
1468
+ image = self.vae.decode(latents, return_dict=False)[0]
1469
+ image = (image / 2 + 0.5).clamp(0, 1)
1470
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
1471
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
1472
+ return image
1473
+
1474
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
1475
+ def prepare_extra_step_kwargs(self, generator, eta):
1476
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
1477
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
1478
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
1479
+ # and should be between [0, 1]
1480
+
1481
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
1482
+ extra_step_kwargs = {}
1483
+ if accepts_eta:
1484
+ extra_step_kwargs["eta"] = eta
1485
+
1486
+ # check if the scheduler accepts generator
1487
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
1488
+ if accepts_generator:
1489
+ extra_step_kwargs["generator"] = generator
1490
+ return extra_step_kwargs
1491
+
1492
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
1493
+ def prepare_latents(
1494
+ self,
1495
+ batch_size,
1496
+ num_channels_latents,
1497
+ height,
1498
+ width,
1499
+ dtype,
1500
+ device,
1501
+ generator,
1502
+ latents=None,
1503
+ ):
1504
+ shape = (
1505
+ batch_size,
1506
+ num_channels_latents,
1507
+ height // self.vae_scale_factor,
1508
+ width // self.vae_scale_factor,
1509
+ )
1510
+ if isinstance(generator, list) and len(generator) != batch_size:
1511
+ raise ValueError(
1512
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
1513
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
1514
+ )
1515
+
1516
+ if latents is None:
1517
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
1518
+ else:
1519
+ latents = latents.to(device)
1520
+
1521
+ # scale the initial noise by the standard deviation required by the scheduler
1522
+ latents = latents * self.scheduler.init_noise_sigma
1523
+ return latents
1524
+
1525
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
1526
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
1527
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
1528
+
1529
+ The suffixes after the scaling factors represent the stages where they are being applied.
1530
+
1531
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
1532
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
1533
+
1534
+ Args:
1535
+ s1 (`float`):
1536
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
1537
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1538
+ s2 (`float`):
1539
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
1540
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1541
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
1542
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
1543
+ """
1544
+ if not hasattr(self, "unet"):
1545
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
1546
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
1547
+
1548
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
1549
+ def disable_freeu(self):
1550
+ """Disables the FreeU mechanism if enabled."""
1551
+ self.unet.disable_freeu()
1552
+
1553
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1554
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
1555
+ """
1556
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
1557
+
1558
+ Args:
1559
+ timesteps (`torch.Tensor`):
1560
+ generate embedding vectors at these timesteps
1561
+ embedding_dim (`int`, *optional*, defaults to 512):
1562
+ dimension of the embeddings to generate
1563
+ dtype:
1564
+ data type of the generated embeddings
1565
+
1566
+ Returns:
1567
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
1568
+ """
1569
+ assert len(w.shape) == 1
1570
+ w = w * 1000.0
1571
+
1572
+ half_dim = embedding_dim // 2
1573
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
1574
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
1575
+ emb = w.to(dtype)[:, None] * emb[None, :]
1576
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
1577
+ if embedding_dim % 2 == 1: # zero pad
1578
+ emb = torch.nn.functional.pad(emb, (0, 1))
1579
+ assert emb.shape == (w.shape[0], embedding_dim)
1580
+ return emb
1581
+
1582
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_scale
1583
+ @property
1584
+ def guidance_scale(self):
1585
+ return self._guidance_scale
1586
+
1587
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.guidance_rescale
1588
+ @property
1589
+ def guidance_rescale(self):
1590
+ return self._guidance_rescale
1591
+
1592
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.clip_skip
1593
+ @property
1594
+ def clip_skip(self):
1595
+ return self._clip_skip
1596
+
1597
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1598
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1599
+ # corresponds to doing no classifier free guidance.
1600
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.do_classifier_free_guidance
1601
+ @property
1602
+ def do_classifier_free_guidance(self):
1603
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
1604
+
1605
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.cross_attention_kwargs
1606
+ @property
1607
+ def cross_attention_kwargs(self):
1608
+ return self._cross_attention_kwargs
1609
+
1610
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.num_timesteps
1611
+ @property
1612
+ def num_timesteps(self):
1613
+ return self._num_timesteps
v0.26.3/lpw_stable_diffusion.py ADDED
@@ -0,0 +1,1471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Any, Callable, Dict, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from packaging import version
9
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
+
11
+ from diffusers import DiffusionPipeline
12
+ from diffusers.configuration_utils import FrozenDict
13
+ from diffusers.image_processor import VaeImageProcessor
14
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
15
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
16
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
17
+ from diffusers.schedulers import KarrasDiffusionSchedulers
18
+ from diffusers.utils import (
19
+ PIL_INTERPOLATION,
20
+ deprecate,
21
+ is_accelerate_available,
22
+ is_accelerate_version,
23
+ logging,
24
+ )
25
+ from diffusers.utils.torch_utils import randn_tensor
26
+
27
+
28
+ # ------------------------------------------------------------------------------
29
+
30
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
31
+
32
+ re_attention = re.compile(
33
+ r"""
34
+ \\\(|
35
+ \\\)|
36
+ \\\[|
37
+ \\]|
38
+ \\\\|
39
+ \\|
40
+ \(|
41
+ \[|
42
+ :([+-]?[.\d]+)\)|
43
+ \)|
44
+ ]|
45
+ [^\\()\[\]:]+|
46
+ :
47
+ """,
48
+ re.X,
49
+ )
50
+
51
+
52
+ def parse_prompt_attention(text):
53
+ """
54
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
55
+ Accepted tokens are:
56
+ (abc) - increases attention to abc by a multiplier of 1.1
57
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
58
+ [abc] - decreases attention to abc by a multiplier of 1.1
59
+ \\( - literal character '('
60
+ \\[ - literal character '['
61
+ \\) - literal character ')'
62
+ \\] - literal character ']'
63
+ \\ - literal character '\'
64
+ anything else - just text
65
+ >>> parse_prompt_attention('normal text')
66
+ [['normal text', 1.0]]
67
+ >>> parse_prompt_attention('an (important) word')
68
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
69
+ >>> parse_prompt_attention('(unbalanced')
70
+ [['unbalanced', 1.1]]
71
+ >>> parse_prompt_attention('\\(literal\\]')
72
+ [['(literal]', 1.0]]
73
+ >>> parse_prompt_attention('(unnecessary)(parens)')
74
+ [['unnecessaryparens', 1.1]]
75
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
76
+ [['a ', 1.0],
77
+ ['house', 1.5730000000000004],
78
+ [' ', 1.1],
79
+ ['on', 1.0],
80
+ [' a ', 1.1],
81
+ ['hill', 0.55],
82
+ [', sun, ', 1.1],
83
+ ['sky', 1.4641000000000006],
84
+ ['.', 1.1]]
85
+ """
86
+
87
+ res = []
88
+ round_brackets = []
89
+ square_brackets = []
90
+
91
+ round_bracket_multiplier = 1.1
92
+ square_bracket_multiplier = 1 / 1.1
93
+
94
+ def multiply_range(start_position, multiplier):
95
+ for p in range(start_position, len(res)):
96
+ res[p][1] *= multiplier
97
+
98
+ for m in re_attention.finditer(text):
99
+ text = m.group(0)
100
+ weight = m.group(1)
101
+
102
+ if text.startswith("\\"):
103
+ res.append([text[1:], 1.0])
104
+ elif text == "(":
105
+ round_brackets.append(len(res))
106
+ elif text == "[":
107
+ square_brackets.append(len(res))
108
+ elif weight is not None and len(round_brackets) > 0:
109
+ multiply_range(round_brackets.pop(), float(weight))
110
+ elif text == ")" and len(round_brackets) > 0:
111
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
112
+ elif text == "]" and len(square_brackets) > 0:
113
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
114
+ else:
115
+ res.append([text, 1.0])
116
+
117
+ for pos in round_brackets:
118
+ multiply_range(pos, round_bracket_multiplier)
119
+
120
+ for pos in square_brackets:
121
+ multiply_range(pos, square_bracket_multiplier)
122
+
123
+ if len(res) == 0:
124
+ res = [["", 1.0]]
125
+
126
+ # merge runs of identical weights
127
+ i = 0
128
+ while i + 1 < len(res):
129
+ if res[i][1] == res[i + 1][1]:
130
+ res[i][0] += res[i + 1][0]
131
+ res.pop(i + 1)
132
+ else:
133
+ i += 1
134
+
135
+ return res
136
+
137
+
138
+ def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
139
+ r"""
140
+ Tokenize a list of prompts and return its tokens with weights of each token.
141
+
142
+ No padding, starting or ending token is included.
143
+ """
144
+ tokens = []
145
+ weights = []
146
+ truncated = False
147
+ for text in prompt:
148
+ texts_and_weights = parse_prompt_attention(text)
149
+ text_token = []
150
+ text_weight = []
151
+ for word, weight in texts_and_weights:
152
+ # tokenize and discard the starting and the ending token
153
+ token = pipe.tokenizer(word).input_ids[1:-1]
154
+ text_token += token
155
+ # copy the weight by length of token
156
+ text_weight += [weight] * len(token)
157
+ # stop if the text is too long (longer than truncation limit)
158
+ if len(text_token) > max_length:
159
+ truncated = True
160
+ break
161
+ # truncate
162
+ if len(text_token) > max_length:
163
+ truncated = True
164
+ text_token = text_token[:max_length]
165
+ text_weight = text_weight[:max_length]
166
+ tokens.append(text_token)
167
+ weights.append(text_weight)
168
+ if truncated:
169
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
170
+ return tokens, weights
171
+
172
+
173
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
174
+ r"""
175
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
176
+ """
177
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
178
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
179
+ for i in range(len(tokens)):
180
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
181
+ if no_boseos_middle:
182
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
183
+ else:
184
+ w = []
185
+ if len(weights[i]) == 0:
186
+ w = [1.0] * weights_length
187
+ else:
188
+ for j in range(max_embeddings_multiples):
189
+ w.append(1.0) # weight for starting token in this chunk
190
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
191
+ w.append(1.0) # weight for ending token in this chunk
192
+ w += [1.0] * (weights_length - len(w))
193
+ weights[i] = w[:]
194
+
195
+ return tokens, weights
196
+
197
+
198
+ def get_unweighted_text_embeddings(
199
+ pipe: DiffusionPipeline,
200
+ text_input: torch.Tensor,
201
+ chunk_length: int,
202
+ no_boseos_middle: Optional[bool] = True,
203
+ ):
204
+ """
205
+ When the length of tokens is a multiple of the capacity of the text encoder,
206
+ it should be split into chunks and sent to the text encoder individually.
207
+ """
208
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
209
+ if max_embeddings_multiples > 1:
210
+ text_embeddings = []
211
+ for i in range(max_embeddings_multiples):
212
+ # extract the i-th chunk
213
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
214
+
215
+ # cover the head and the tail by the starting and the ending tokens
216
+ text_input_chunk[:, 0] = text_input[0, 0]
217
+ text_input_chunk[:, -1] = text_input[0, -1]
218
+ text_embedding = pipe.text_encoder(text_input_chunk)[0]
219
+
220
+ if no_boseos_middle:
221
+ if i == 0:
222
+ # discard the ending token
223
+ text_embedding = text_embedding[:, :-1]
224
+ elif i == max_embeddings_multiples - 1:
225
+ # discard the starting token
226
+ text_embedding = text_embedding[:, 1:]
227
+ else:
228
+ # discard both starting and ending tokens
229
+ text_embedding = text_embedding[:, 1:-1]
230
+
231
+ text_embeddings.append(text_embedding)
232
+ text_embeddings = torch.concat(text_embeddings, axis=1)
233
+ else:
234
+ text_embeddings = pipe.text_encoder(text_input)[0]
235
+ return text_embeddings
236
+
237
+
238
+ def get_weighted_text_embeddings(
239
+ pipe: DiffusionPipeline,
240
+ prompt: Union[str, List[str]],
241
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
242
+ max_embeddings_multiples: Optional[int] = 3,
243
+ no_boseos_middle: Optional[bool] = False,
244
+ skip_parsing: Optional[bool] = False,
245
+ skip_weighting: Optional[bool] = False,
246
+ ):
247
+ r"""
248
+ Prompts can be assigned with local weights using brackets. For example,
249
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
250
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
251
+
252
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
253
+
254
+ Args:
255
+ pipe (`DiffusionPipeline`):
256
+ Pipe to provide access to the tokenizer and the text encoder.
257
+ prompt (`str` or `List[str]`):
258
+ The prompt or prompts to guide the image generation.
259
+ uncond_prompt (`str` or `List[str]`):
260
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
261
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
262
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
263
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
264
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
265
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
266
+ ending token in each of the chunk in the middle.
267
+ skip_parsing (`bool`, *optional*, defaults to `False`):
268
+ Skip the parsing of brackets.
269
+ skip_weighting (`bool`, *optional*, defaults to `False`):
270
+ Skip the weighting. When the parsing is skipped, it is forced True.
271
+ """
272
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
273
+ if isinstance(prompt, str):
274
+ prompt = [prompt]
275
+
276
+ if not skip_parsing:
277
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
278
+ if uncond_prompt is not None:
279
+ if isinstance(uncond_prompt, str):
280
+ uncond_prompt = [uncond_prompt]
281
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
282
+ else:
283
+ prompt_tokens = [
284
+ token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
285
+ ]
286
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
287
+ if uncond_prompt is not None:
288
+ if isinstance(uncond_prompt, str):
289
+ uncond_prompt = [uncond_prompt]
290
+ uncond_tokens = [
291
+ token[1:-1]
292
+ for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
293
+ ]
294
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
295
+
296
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
297
+ max_length = max([len(token) for token in prompt_tokens])
298
+ if uncond_prompt is not None:
299
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
300
+
301
+ max_embeddings_multiples = min(
302
+ max_embeddings_multiples,
303
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
304
+ )
305
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
306
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
307
+
308
+ # pad the length of tokens and weights
309
+ bos = pipe.tokenizer.bos_token_id
310
+ eos = pipe.tokenizer.eos_token_id
311
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
312
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
313
+ prompt_tokens,
314
+ prompt_weights,
315
+ max_length,
316
+ bos,
317
+ eos,
318
+ pad,
319
+ no_boseos_middle=no_boseos_middle,
320
+ chunk_length=pipe.tokenizer.model_max_length,
321
+ )
322
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
323
+ if uncond_prompt is not None:
324
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
325
+ uncond_tokens,
326
+ uncond_weights,
327
+ max_length,
328
+ bos,
329
+ eos,
330
+ pad,
331
+ no_boseos_middle=no_boseos_middle,
332
+ chunk_length=pipe.tokenizer.model_max_length,
333
+ )
334
+ uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
335
+
336
+ # get the embeddings
337
+ text_embeddings = get_unweighted_text_embeddings(
338
+ pipe,
339
+ prompt_tokens,
340
+ pipe.tokenizer.model_max_length,
341
+ no_boseos_middle=no_boseos_middle,
342
+ )
343
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=text_embeddings.device)
344
+ if uncond_prompt is not None:
345
+ uncond_embeddings = get_unweighted_text_embeddings(
346
+ pipe,
347
+ uncond_tokens,
348
+ pipe.tokenizer.model_max_length,
349
+ no_boseos_middle=no_boseos_middle,
350
+ )
351
+ uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=uncond_embeddings.device)
352
+
353
+ # assign weights to the prompts and normalize in the sense of mean
354
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
355
+ if (not skip_parsing) and (not skip_weighting):
356
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
357
+ text_embeddings *= prompt_weights.unsqueeze(-1)
358
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
359
+ text_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
360
+ if uncond_prompt is not None:
361
+ previous_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
362
+ uncond_embeddings *= uncond_weights.unsqueeze(-1)
363
+ current_mean = uncond_embeddings.float().mean(axis=[-2, -1]).to(uncond_embeddings.dtype)
364
+ uncond_embeddings *= (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
365
+
366
+ if uncond_prompt is not None:
367
+ return text_embeddings, uncond_embeddings
368
+ return text_embeddings, None
369
+
370
+
371
+ def preprocess_image(image, batch_size):
372
+ w, h = image.size
373
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
374
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
375
+ image = np.array(image).astype(np.float32) / 255.0
376
+ image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
377
+ image = torch.from_numpy(image)
378
+ return 2.0 * image - 1.0
379
+
380
+
381
+ def preprocess_mask(mask, batch_size, scale_factor=8):
382
+ if not isinstance(mask, torch.FloatTensor):
383
+ mask = mask.convert("L")
384
+ w, h = mask.size
385
+ w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
386
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
387
+ mask = np.array(mask).astype(np.float32) / 255.0
388
+ mask = np.tile(mask, (4, 1, 1))
389
+ mask = np.vstack([mask[None]] * batch_size)
390
+ mask = 1 - mask # repaint white, keep black
391
+ mask = torch.from_numpy(mask)
392
+ return mask
393
+
394
+ else:
395
+ valid_mask_channel_sizes = [1, 3]
396
+ # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W)
397
+ if mask.shape[3] in valid_mask_channel_sizes:
398
+ mask = mask.permute(0, 3, 1, 2)
399
+ elif mask.shape[1] not in valid_mask_channel_sizes:
400
+ raise ValueError(
401
+ f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension,"
402
+ f" but received mask of shape {tuple(mask.shape)}"
403
+ )
404
+ # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape
405
+ mask = mask.mean(dim=1, keepdim=True)
406
+ h, w = mask.shape[-2:]
407
+ h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8
408
+ mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor))
409
+ return mask
410
+
411
+
412
+ class StableDiffusionLongPromptWeightingPipeline(
413
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
414
+ ):
415
+ r"""
416
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
417
+ weighting in prompt.
418
+
419
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
420
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
421
+
422
+ Args:
423
+ vae ([`AutoencoderKL`]):
424
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
425
+ text_encoder ([`CLIPTextModel`]):
426
+ Frozen text-encoder. Stable Diffusion uses the text portion of
427
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
428
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
429
+ tokenizer (`CLIPTokenizer`):
430
+ Tokenizer of class
431
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
432
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
433
+ scheduler ([`SchedulerMixin`]):
434
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
435
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
436
+ safety_checker ([`StableDiffusionSafetyChecker`]):
437
+ Classification module that estimates whether generated images could be considered offensive or harmful.
438
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
439
+ feature_extractor ([`CLIPImageProcessor`]):
440
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
441
+ """
442
+
443
+ _optional_components = ["safety_checker", "feature_extractor"]
444
+
445
+ def __init__(
446
+ self,
447
+ vae: AutoencoderKL,
448
+ text_encoder: CLIPTextModel,
449
+ tokenizer: CLIPTokenizer,
450
+ unet: UNet2DConditionModel,
451
+ scheduler: KarrasDiffusionSchedulers,
452
+ safety_checker: StableDiffusionSafetyChecker,
453
+ feature_extractor: CLIPImageProcessor,
454
+ requires_safety_checker: bool = True,
455
+ ):
456
+ super().__init__()
457
+
458
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
459
+ deprecation_message = (
460
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
461
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
462
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
463
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
464
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
465
+ " file"
466
+ )
467
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
468
+ new_config = dict(scheduler.config)
469
+ new_config["steps_offset"] = 1
470
+ scheduler._internal_dict = FrozenDict(new_config)
471
+
472
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
473
+ deprecation_message = (
474
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
475
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
476
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
477
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
478
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
479
+ )
480
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
481
+ new_config = dict(scheduler.config)
482
+ new_config["clip_sample"] = False
483
+ scheduler._internal_dict = FrozenDict(new_config)
484
+
485
+ if safety_checker is None and requires_safety_checker:
486
+ logger.warning(
487
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
488
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
489
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
490
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
491
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
492
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
493
+ )
494
+
495
+ if safety_checker is not None and feature_extractor is None:
496
+ raise ValueError(
497
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
498
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
499
+ )
500
+
501
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
502
+ version.parse(unet.config._diffusers_version).base_version
503
+ ) < version.parse("0.9.0.dev0")
504
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
505
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
506
+ deprecation_message = (
507
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
508
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
509
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
510
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
511
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
512
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
513
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
514
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
515
+ " the `unet/config.json` file"
516
+ )
517
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
518
+ new_config = dict(unet.config)
519
+ new_config["sample_size"] = 64
520
+ unet._internal_dict = FrozenDict(new_config)
521
+ self.register_modules(
522
+ vae=vae,
523
+ text_encoder=text_encoder,
524
+ tokenizer=tokenizer,
525
+ unet=unet,
526
+ scheduler=scheduler,
527
+ safety_checker=safety_checker,
528
+ feature_extractor=feature_extractor,
529
+ )
530
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
531
+
532
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
533
+ self.register_to_config(
534
+ requires_safety_checker=requires_safety_checker,
535
+ )
536
+
537
+ def enable_vae_slicing(self):
538
+ r"""
539
+ Enable sliced VAE decoding.
540
+
541
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
542
+ steps. This is useful to save some memory and allow larger batch sizes.
543
+ """
544
+ self.vae.enable_slicing()
545
+
546
+ def disable_vae_slicing(self):
547
+ r"""
548
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
549
+ computing decoding in one step.
550
+ """
551
+ self.vae.disable_slicing()
552
+
553
+ def enable_vae_tiling(self):
554
+ r"""
555
+ Enable tiled VAE decoding.
556
+
557
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
558
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
559
+ """
560
+ self.vae.enable_tiling()
561
+
562
+ def disable_vae_tiling(self):
563
+ r"""
564
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
565
+ computing decoding in one step.
566
+ """
567
+ self.vae.disable_tiling()
568
+
569
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_sequential_cpu_offload
570
+ def enable_sequential_cpu_offload(self, gpu_id=0):
571
+ r"""
572
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
573
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
574
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
575
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
576
+ `enable_model_cpu_offload`, but performance is lower.
577
+ """
578
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
579
+ from accelerate import cpu_offload
580
+ else:
581
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
582
+
583
+ device = torch.device(f"cuda:{gpu_id}")
584
+
585
+ if self.device.type != "cpu":
586
+ self.to("cpu", silence_dtype_warnings=True)
587
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
588
+
589
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
590
+ cpu_offload(cpu_offloaded_model, device)
591
+
592
+ if self.safety_checker is not None:
593
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
594
+
595
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
596
+ def enable_model_cpu_offload(self, gpu_id=0):
597
+ r"""
598
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
599
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
600
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
601
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
602
+ """
603
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
604
+ from accelerate import cpu_offload_with_hook
605
+ else:
606
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
607
+
608
+ device = torch.device(f"cuda:{gpu_id}")
609
+
610
+ if self.device.type != "cpu":
611
+ self.to("cpu", silence_dtype_warnings=True)
612
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
613
+
614
+ hook = None
615
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
616
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
617
+
618
+ if self.safety_checker is not None:
619
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
620
+
621
+ # We'll offload the last model manually.
622
+ self.final_offload_hook = hook
623
+
624
+ @property
625
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
626
+ def _execution_device(self):
627
+ r"""
628
+ Returns the device on which the pipeline's models will be executed. After calling
629
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
630
+ hooks.
631
+ """
632
+ if not hasattr(self.unet, "_hf_hook"):
633
+ return self.device
634
+ for module in self.unet.modules():
635
+ if (
636
+ hasattr(module, "_hf_hook")
637
+ and hasattr(module._hf_hook, "execution_device")
638
+ and module._hf_hook.execution_device is not None
639
+ ):
640
+ return torch.device(module._hf_hook.execution_device)
641
+ return self.device
642
+
643
+ def _encode_prompt(
644
+ self,
645
+ prompt,
646
+ device,
647
+ num_images_per_prompt,
648
+ do_classifier_free_guidance,
649
+ negative_prompt=None,
650
+ max_embeddings_multiples=3,
651
+ prompt_embeds: Optional[torch.FloatTensor] = None,
652
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
653
+ ):
654
+ r"""
655
+ Encodes the prompt into text encoder hidden states.
656
+
657
+ Args:
658
+ prompt (`str` or `list(int)`):
659
+ prompt to be encoded
660
+ device: (`torch.device`):
661
+ torch device
662
+ num_images_per_prompt (`int`):
663
+ number of images that should be generated per prompt
664
+ do_classifier_free_guidance (`bool`):
665
+ whether to use classifier free guidance or not
666
+ negative_prompt (`str` or `List[str]`):
667
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
668
+ if `guidance_scale` is less than `1`).
669
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
670
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
671
+ """
672
+ if prompt is not None and isinstance(prompt, str):
673
+ batch_size = 1
674
+ elif prompt is not None and isinstance(prompt, list):
675
+ batch_size = len(prompt)
676
+ else:
677
+ batch_size = prompt_embeds.shape[0]
678
+
679
+ if negative_prompt_embeds is None:
680
+ if negative_prompt is None:
681
+ negative_prompt = [""] * batch_size
682
+ elif isinstance(negative_prompt, str):
683
+ negative_prompt = [negative_prompt] * batch_size
684
+ if batch_size != len(negative_prompt):
685
+ raise ValueError(
686
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
687
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
688
+ " the batch size of `prompt`."
689
+ )
690
+ if prompt_embeds is None or negative_prompt_embeds is None:
691
+ if isinstance(self, TextualInversionLoaderMixin):
692
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
693
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
694
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, self.tokenizer)
695
+
696
+ prompt_embeds1, negative_prompt_embeds1 = get_weighted_text_embeddings(
697
+ pipe=self,
698
+ prompt=prompt,
699
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
700
+ max_embeddings_multiples=max_embeddings_multiples,
701
+ )
702
+ if prompt_embeds is None:
703
+ prompt_embeds = prompt_embeds1
704
+ if negative_prompt_embeds is None:
705
+ negative_prompt_embeds = negative_prompt_embeds1
706
+
707
+ bs_embed, seq_len, _ = prompt_embeds.shape
708
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
709
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
710
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
711
+
712
+ if do_classifier_free_guidance:
713
+ bs_embed, seq_len, _ = negative_prompt_embeds.shape
714
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
715
+ negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
716
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
717
+
718
+ return prompt_embeds
719
+
720
+ def check_inputs(
721
+ self,
722
+ prompt,
723
+ height,
724
+ width,
725
+ strength,
726
+ callback_steps,
727
+ negative_prompt=None,
728
+ prompt_embeds=None,
729
+ negative_prompt_embeds=None,
730
+ ):
731
+ if height % 8 != 0 or width % 8 != 0:
732
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
733
+
734
+ if strength < 0 or strength > 1:
735
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
736
+
737
+ if (callback_steps is None) or (
738
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
739
+ ):
740
+ raise ValueError(
741
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
742
+ f" {type(callback_steps)}."
743
+ )
744
+
745
+ if prompt is not None and prompt_embeds is not None:
746
+ raise ValueError(
747
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
748
+ " only forward one of the two."
749
+ )
750
+ elif prompt is None and prompt_embeds is None:
751
+ raise ValueError(
752
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
753
+ )
754
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
755
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
756
+
757
+ if negative_prompt is not None and negative_prompt_embeds is not None:
758
+ raise ValueError(
759
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
760
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
761
+ )
762
+
763
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
764
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
765
+ raise ValueError(
766
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
767
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
768
+ f" {negative_prompt_embeds.shape}."
769
+ )
770
+
771
+ def get_timesteps(self, num_inference_steps, strength, device, is_text2img):
772
+ if is_text2img:
773
+ return self.scheduler.timesteps.to(device), num_inference_steps
774
+ else:
775
+ # get the original timestep using init_timestep
776
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
777
+
778
+ t_start = max(num_inference_steps - init_timestep, 0)
779
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
780
+
781
+ return timesteps, num_inference_steps - t_start
782
+
783
+ def run_safety_checker(self, image, device, dtype):
784
+ if self.safety_checker is not None:
785
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
786
+ image, has_nsfw_concept = self.safety_checker(
787
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
788
+ )
789
+ else:
790
+ has_nsfw_concept = None
791
+ return image, has_nsfw_concept
792
+
793
+ def decode_latents(self, latents):
794
+ latents = 1 / self.vae.config.scaling_factor * latents
795
+ image = self.vae.decode(latents).sample
796
+ image = (image / 2 + 0.5).clamp(0, 1)
797
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
798
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
799
+ return image
800
+
801
+ def prepare_extra_step_kwargs(self, generator, eta):
802
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
803
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
804
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
805
+ # and should be between [0, 1]
806
+
807
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
808
+ extra_step_kwargs = {}
809
+ if accepts_eta:
810
+ extra_step_kwargs["eta"] = eta
811
+
812
+ # check if the scheduler accepts generator
813
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
814
+ if accepts_generator:
815
+ extra_step_kwargs["generator"] = generator
816
+ return extra_step_kwargs
817
+
818
+ def prepare_latents(
819
+ self,
820
+ image,
821
+ timestep,
822
+ num_images_per_prompt,
823
+ batch_size,
824
+ num_channels_latents,
825
+ height,
826
+ width,
827
+ dtype,
828
+ device,
829
+ generator,
830
+ latents=None,
831
+ ):
832
+ if image is None:
833
+ batch_size = batch_size * num_images_per_prompt
834
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
835
+ if isinstance(generator, list) and len(generator) != batch_size:
836
+ raise ValueError(
837
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
838
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
839
+ )
840
+
841
+ if latents is None:
842
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
843
+ else:
844
+ latents = latents.to(device)
845
+
846
+ # scale the initial noise by the standard deviation required by the scheduler
847
+ latents = latents * self.scheduler.init_noise_sigma
848
+ return latents, None, None
849
+ else:
850
+ image = image.to(device=self.device, dtype=dtype)
851
+ init_latent_dist = self.vae.encode(image).latent_dist
852
+ init_latents = init_latent_dist.sample(generator=generator)
853
+ init_latents = self.vae.config.scaling_factor * init_latents
854
+
855
+ # Expand init_latents for batch_size and num_images_per_prompt
856
+ init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0)
857
+ init_latents_orig = init_latents
858
+
859
+ # add noise to latents using the timesteps
860
+ noise = randn_tensor(init_latents.shape, generator=generator, device=self.device, dtype=dtype)
861
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
862
+ latents = init_latents
863
+ return latents, init_latents_orig, noise
864
+
865
+ @torch.no_grad()
866
+ def __call__(
867
+ self,
868
+ prompt: Union[str, List[str]],
869
+ negative_prompt: Optional[Union[str, List[str]]] = None,
870
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
871
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
872
+ height: int = 512,
873
+ width: int = 512,
874
+ num_inference_steps: int = 50,
875
+ guidance_scale: float = 7.5,
876
+ strength: float = 0.8,
877
+ num_images_per_prompt: Optional[int] = 1,
878
+ add_predicted_noise: Optional[bool] = False,
879
+ eta: float = 0.0,
880
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
881
+ latents: Optional[torch.FloatTensor] = None,
882
+ prompt_embeds: Optional[torch.FloatTensor] = None,
883
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
884
+ max_embeddings_multiples: Optional[int] = 3,
885
+ output_type: Optional[str] = "pil",
886
+ return_dict: bool = True,
887
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
888
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
889
+ callback_steps: int = 1,
890
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
891
+ ):
892
+ r"""
893
+ Function invoked when calling the pipeline for generation.
894
+
895
+ Args:
896
+ prompt (`str` or `List[str]`):
897
+ The prompt or prompts to guide the image generation.
898
+ negative_prompt (`str` or `List[str]`, *optional*):
899
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
900
+ if `guidance_scale` is less than `1`).
901
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
902
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
903
+ process.
904
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
905
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
906
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
907
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
908
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
909
+ height (`int`, *optional*, defaults to 512):
910
+ The height in pixels of the generated image.
911
+ width (`int`, *optional*, defaults to 512):
912
+ The width in pixels of the generated image.
913
+ num_inference_steps (`int`, *optional*, defaults to 50):
914
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
915
+ expense of slower inference.
916
+ guidance_scale (`float`, *optional*, defaults to 7.5):
917
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
918
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
919
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
920
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
921
+ usually at the expense of lower image quality.
922
+ strength (`float`, *optional*, defaults to 0.8):
923
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
924
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
925
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
926
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
927
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
928
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
929
+ The number of images to generate per prompt.
930
+ add_predicted_noise (`bool`, *optional*, defaults to True):
931
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
932
+ the reverse diffusion process
933
+ eta (`float`, *optional*, defaults to 0.0):
934
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
935
+ [`schedulers.DDIMScheduler`], will be ignored for others.
936
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
937
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
938
+ to make generation deterministic.
939
+ latents (`torch.FloatTensor`, *optional*):
940
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
941
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
942
+ tensor will ge generated by sampling using the supplied random `generator`.
943
+ prompt_embeds (`torch.FloatTensor`, *optional*):
944
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
945
+ provided, text embeddings will be generated from `prompt` input argument.
946
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
947
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
948
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
949
+ argument.
950
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
951
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
952
+ output_type (`str`, *optional*, defaults to `"pil"`):
953
+ The output format of the generate image. Choose between
954
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
955
+ return_dict (`bool`, *optional*, defaults to `True`):
956
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
957
+ plain tuple.
958
+ callback (`Callable`, *optional*):
959
+ A function that will be called every `callback_steps` steps during inference. The function will be
960
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
961
+ is_cancelled_callback (`Callable`, *optional*):
962
+ A function that will be called every `callback_steps` steps during inference. If the function returns
963
+ `True`, the inference will be cancelled.
964
+ callback_steps (`int`, *optional*, defaults to 1):
965
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
966
+ called at every step.
967
+ cross_attention_kwargs (`dict`, *optional*):
968
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
969
+ `self.processor` in
970
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
971
+
972
+ Returns:
973
+ `None` if cancelled by `is_cancelled_callback`,
974
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
975
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
976
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
977
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
978
+ (nsfw) content, according to the `safety_checker`.
979
+ """
980
+ # 0. Default height and width to unet
981
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
982
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
983
+
984
+ # 1. Check inputs. Raise error if not correct
985
+ self.check_inputs(
986
+ prompt, height, width, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
987
+ )
988
+
989
+ # 2. Define call parameters
990
+ if prompt is not None and isinstance(prompt, str):
991
+ batch_size = 1
992
+ elif prompt is not None and isinstance(prompt, list):
993
+ batch_size = len(prompt)
994
+ else:
995
+ batch_size = prompt_embeds.shape[0]
996
+
997
+ device = self._execution_device
998
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
999
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1000
+ # corresponds to doing no classifier free guidance.
1001
+ do_classifier_free_guidance = guidance_scale > 1.0
1002
+
1003
+ # 3. Encode input prompt
1004
+ prompt_embeds = self._encode_prompt(
1005
+ prompt,
1006
+ device,
1007
+ num_images_per_prompt,
1008
+ do_classifier_free_guidance,
1009
+ negative_prompt,
1010
+ max_embeddings_multiples,
1011
+ prompt_embeds=prompt_embeds,
1012
+ negative_prompt_embeds=negative_prompt_embeds,
1013
+ )
1014
+ dtype = prompt_embeds.dtype
1015
+
1016
+ # 4. Preprocess image and mask
1017
+ if isinstance(image, PIL.Image.Image):
1018
+ image = preprocess_image(image, batch_size)
1019
+ if image is not None:
1020
+ image = image.to(device=self.device, dtype=dtype)
1021
+ if isinstance(mask_image, PIL.Image.Image):
1022
+ mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor)
1023
+ if mask_image is not None:
1024
+ mask = mask_image.to(device=self.device, dtype=dtype)
1025
+ mask = torch.cat([mask] * num_images_per_prompt)
1026
+ else:
1027
+ mask = None
1028
+
1029
+ # 5. set timesteps
1030
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1031
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device, image is None)
1032
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1033
+
1034
+ # 6. Prepare latent variables
1035
+ latents, init_latents_orig, noise = self.prepare_latents(
1036
+ image,
1037
+ latent_timestep,
1038
+ num_images_per_prompt,
1039
+ batch_size,
1040
+ self.unet.config.in_channels,
1041
+ height,
1042
+ width,
1043
+ dtype,
1044
+ device,
1045
+ generator,
1046
+ latents,
1047
+ )
1048
+
1049
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1050
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1051
+
1052
+ # 8. Denoising loop
1053
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1054
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1055
+ for i, t in enumerate(timesteps):
1056
+ # expand the latents if we are doing classifier free guidance
1057
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1058
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1059
+
1060
+ # predict the noise residual
1061
+ noise_pred = self.unet(
1062
+ latent_model_input,
1063
+ t,
1064
+ encoder_hidden_states=prompt_embeds,
1065
+ cross_attention_kwargs=cross_attention_kwargs,
1066
+ ).sample
1067
+
1068
+ # perform guidance
1069
+ if do_classifier_free_guidance:
1070
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1071
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1072
+
1073
+ # compute the previous noisy sample x_t -> x_t-1
1074
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1075
+
1076
+ if mask is not None:
1077
+ # masking
1078
+ if add_predicted_noise:
1079
+ init_latents_proper = self.scheduler.add_noise(
1080
+ init_latents_orig, noise_pred_uncond, torch.tensor([t])
1081
+ )
1082
+ else:
1083
+ init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
1084
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
1085
+
1086
+ # call the callback, if provided
1087
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1088
+ progress_bar.update()
1089
+ if i % callback_steps == 0:
1090
+ if callback is not None:
1091
+ step_idx = i // getattr(self.scheduler, "order", 1)
1092
+ callback(step_idx, t, latents)
1093
+ if is_cancelled_callback is not None and is_cancelled_callback():
1094
+ return None
1095
+
1096
+ if output_type == "latent":
1097
+ image = latents
1098
+ has_nsfw_concept = None
1099
+ elif output_type == "pil":
1100
+ # 9. Post-processing
1101
+ image = self.decode_latents(latents)
1102
+
1103
+ # 10. Run safety checker
1104
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1105
+
1106
+ # 11. Convert to PIL
1107
+ image = self.numpy_to_pil(image)
1108
+ else:
1109
+ # 9. Post-processing
1110
+ image = self.decode_latents(latents)
1111
+
1112
+ # 10. Run safety checker
1113
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1114
+
1115
+ # Offload last model to CPU
1116
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1117
+ self.final_offload_hook.offload()
1118
+
1119
+ if not return_dict:
1120
+ return image, has_nsfw_concept
1121
+
1122
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1123
+
1124
+ def text2img(
1125
+ self,
1126
+ prompt: Union[str, List[str]],
1127
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1128
+ height: int = 512,
1129
+ width: int = 512,
1130
+ num_inference_steps: int = 50,
1131
+ guidance_scale: float = 7.5,
1132
+ num_images_per_prompt: Optional[int] = 1,
1133
+ eta: float = 0.0,
1134
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1135
+ latents: Optional[torch.FloatTensor] = None,
1136
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1137
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1138
+ max_embeddings_multiples: Optional[int] = 3,
1139
+ output_type: Optional[str] = "pil",
1140
+ return_dict: bool = True,
1141
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1142
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1143
+ callback_steps: int = 1,
1144
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1145
+ ):
1146
+ r"""
1147
+ Function for text-to-image generation.
1148
+ Args:
1149
+ prompt (`str` or `List[str]`):
1150
+ The prompt or prompts to guide the image generation.
1151
+ negative_prompt (`str` or `List[str]`, *optional*):
1152
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1153
+ if `guidance_scale` is less than `1`).
1154
+ height (`int`, *optional*, defaults to 512):
1155
+ The height in pixels of the generated image.
1156
+ width (`int`, *optional*, defaults to 512):
1157
+ The width in pixels of the generated image.
1158
+ num_inference_steps (`int`, *optional*, defaults to 50):
1159
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1160
+ expense of slower inference.
1161
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1162
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1163
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1164
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1165
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1166
+ usually at the expense of lower image quality.
1167
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1168
+ The number of images to generate per prompt.
1169
+ eta (`float`, *optional*, defaults to 0.0):
1170
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1171
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1172
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1173
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1174
+ to make generation deterministic.
1175
+ latents (`torch.FloatTensor`, *optional*):
1176
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1177
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1178
+ tensor will ge generated by sampling using the supplied random `generator`.
1179
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1180
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1181
+ provided, text embeddings will be generated from `prompt` input argument.
1182
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1183
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1184
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1185
+ argument.
1186
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1187
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1188
+ output_type (`str`, *optional*, defaults to `"pil"`):
1189
+ The output format of the generate image. Choose between
1190
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1191
+ return_dict (`bool`, *optional*, defaults to `True`):
1192
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1193
+ plain tuple.
1194
+ callback (`Callable`, *optional*):
1195
+ A function that will be called every `callback_steps` steps during inference. The function will be
1196
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1197
+ is_cancelled_callback (`Callable`, *optional*):
1198
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1199
+ `True`, the inference will be cancelled.
1200
+ callback_steps (`int`, *optional*, defaults to 1):
1201
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1202
+ called at every step.
1203
+ cross_attention_kwargs (`dict`, *optional*):
1204
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1205
+ `self.processor` in
1206
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1207
+
1208
+ Returns:
1209
+ `None` if cancelled by `is_cancelled_callback`,
1210
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1211
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1212
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1213
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1214
+ (nsfw) content, according to the `safety_checker`.
1215
+ """
1216
+ return self.__call__(
1217
+ prompt=prompt,
1218
+ negative_prompt=negative_prompt,
1219
+ height=height,
1220
+ width=width,
1221
+ num_inference_steps=num_inference_steps,
1222
+ guidance_scale=guidance_scale,
1223
+ num_images_per_prompt=num_images_per_prompt,
1224
+ eta=eta,
1225
+ generator=generator,
1226
+ latents=latents,
1227
+ prompt_embeds=prompt_embeds,
1228
+ negative_prompt_embeds=negative_prompt_embeds,
1229
+ max_embeddings_multiples=max_embeddings_multiples,
1230
+ output_type=output_type,
1231
+ return_dict=return_dict,
1232
+ callback=callback,
1233
+ is_cancelled_callback=is_cancelled_callback,
1234
+ callback_steps=callback_steps,
1235
+ cross_attention_kwargs=cross_attention_kwargs,
1236
+ )
1237
+
1238
+ def img2img(
1239
+ self,
1240
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1241
+ prompt: Union[str, List[str]],
1242
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1243
+ strength: float = 0.8,
1244
+ num_inference_steps: Optional[int] = 50,
1245
+ guidance_scale: Optional[float] = 7.5,
1246
+ num_images_per_prompt: Optional[int] = 1,
1247
+ eta: Optional[float] = 0.0,
1248
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1249
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1250
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1251
+ max_embeddings_multiples: Optional[int] = 3,
1252
+ output_type: Optional[str] = "pil",
1253
+ return_dict: bool = True,
1254
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1255
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1256
+ callback_steps: int = 1,
1257
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1258
+ ):
1259
+ r"""
1260
+ Function for image-to-image generation.
1261
+ Args:
1262
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1263
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1264
+ process.
1265
+ prompt (`str` or `List[str]`):
1266
+ The prompt or prompts to guide the image generation.
1267
+ negative_prompt (`str` or `List[str]`, *optional*):
1268
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1269
+ if `guidance_scale` is less than `1`).
1270
+ strength (`float`, *optional*, defaults to 0.8):
1271
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
1272
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
1273
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
1274
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
1275
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
1276
+ num_inference_steps (`int`, *optional*, defaults to 50):
1277
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1278
+ expense of slower inference. This parameter will be modulated by `strength`.
1279
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1280
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1281
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1282
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1283
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1284
+ usually at the expense of lower image quality.
1285
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1286
+ The number of images to generate per prompt.
1287
+ eta (`float`, *optional*, defaults to 0.0):
1288
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1289
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1290
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1291
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1292
+ to make generation deterministic.
1293
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1294
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1295
+ provided, text embeddings will be generated from `prompt` input argument.
1296
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1297
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1298
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1299
+ argument.
1300
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1301
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1302
+ output_type (`str`, *optional*, defaults to `"pil"`):
1303
+ The output format of the generate image. Choose between
1304
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1305
+ return_dict (`bool`, *optional*, defaults to `True`):
1306
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1307
+ plain tuple.
1308
+ callback (`Callable`, *optional*):
1309
+ A function that will be called every `callback_steps` steps during inference. The function will be
1310
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1311
+ is_cancelled_callback (`Callable`, *optional*):
1312
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1313
+ `True`, the inference will be cancelled.
1314
+ callback_steps (`int`, *optional*, defaults to 1):
1315
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1316
+ called at every step.
1317
+ cross_attention_kwargs (`dict`, *optional*):
1318
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1319
+ `self.processor` in
1320
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1321
+
1322
+ Returns:
1323
+ `None` if cancelled by `is_cancelled_callback`,
1324
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1325
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1326
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1327
+ (nsfw) content, according to the `safety_checker`.
1328
+ """
1329
+ return self.__call__(
1330
+ prompt=prompt,
1331
+ negative_prompt=negative_prompt,
1332
+ image=image,
1333
+ num_inference_steps=num_inference_steps,
1334
+ guidance_scale=guidance_scale,
1335
+ strength=strength,
1336
+ num_images_per_prompt=num_images_per_prompt,
1337
+ eta=eta,
1338
+ generator=generator,
1339
+ prompt_embeds=prompt_embeds,
1340
+ negative_prompt_embeds=negative_prompt_embeds,
1341
+ max_embeddings_multiples=max_embeddings_multiples,
1342
+ output_type=output_type,
1343
+ return_dict=return_dict,
1344
+ callback=callback,
1345
+ is_cancelled_callback=is_cancelled_callback,
1346
+ callback_steps=callback_steps,
1347
+ cross_attention_kwargs=cross_attention_kwargs,
1348
+ )
1349
+
1350
+ def inpaint(
1351
+ self,
1352
+ image: Union[torch.FloatTensor, PIL.Image.Image],
1353
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image],
1354
+ prompt: Union[str, List[str]],
1355
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1356
+ strength: float = 0.8,
1357
+ num_inference_steps: Optional[int] = 50,
1358
+ guidance_scale: Optional[float] = 7.5,
1359
+ num_images_per_prompt: Optional[int] = 1,
1360
+ add_predicted_noise: Optional[bool] = False,
1361
+ eta: Optional[float] = 0.0,
1362
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1363
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1364
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1365
+ max_embeddings_multiples: Optional[int] = 3,
1366
+ output_type: Optional[str] = "pil",
1367
+ return_dict: bool = True,
1368
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1369
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
1370
+ callback_steps: int = 1,
1371
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1372
+ ):
1373
+ r"""
1374
+ Function for inpaint.
1375
+ Args:
1376
+ image (`torch.FloatTensor` or `PIL.Image.Image`):
1377
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1378
+ process. This is the image whose masked region will be inpainted.
1379
+ mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
1380
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1381
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1382
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1383
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1384
+ prompt (`str` or `List[str]`):
1385
+ The prompt or prompts to guide the image generation.
1386
+ negative_prompt (`str` or `List[str]`, *optional*):
1387
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1388
+ if `guidance_scale` is less than `1`).
1389
+ strength (`float`, *optional*, defaults to 0.8):
1390
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1391
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1392
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1393
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1394
+ num_inference_steps (`int`, *optional*, defaults to 50):
1395
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1396
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1397
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1398
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1399
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1400
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1401
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1402
+ usually at the expense of lower image quality.
1403
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1404
+ The number of images to generate per prompt.
1405
+ add_predicted_noise (`bool`, *optional*, defaults to True):
1406
+ Use predicted noise instead of random noise when constructing noisy versions of the original image in
1407
+ the reverse diffusion process
1408
+ eta (`float`, *optional*, defaults to 0.0):
1409
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1410
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1411
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1412
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1413
+ to make generation deterministic.
1414
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1415
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1416
+ provided, text embeddings will be generated from `prompt` input argument.
1417
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1418
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1419
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1420
+ argument.
1421
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1422
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1423
+ output_type (`str`, *optional*, defaults to `"pil"`):
1424
+ The output format of the generate image. Choose between
1425
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1426
+ return_dict (`bool`, *optional*, defaults to `True`):
1427
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1428
+ plain tuple.
1429
+ callback (`Callable`, *optional*):
1430
+ A function that will be called every `callback_steps` steps during inference. The function will be
1431
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1432
+ is_cancelled_callback (`Callable`, *optional*):
1433
+ A function that will be called every `callback_steps` steps during inference. If the function returns
1434
+ `True`, the inference will be cancelled.
1435
+ callback_steps (`int`, *optional*, defaults to 1):
1436
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1437
+ called at every step.
1438
+ cross_attention_kwargs (`dict`, *optional*):
1439
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1440
+ `self.processor` in
1441
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1442
+
1443
+ Returns:
1444
+ `None` if cancelled by `is_cancelled_callback`,
1445
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1446
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1447
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1448
+ (nsfw) content, according to the `safety_checker`.
1449
+ """
1450
+ return self.__call__(
1451
+ prompt=prompt,
1452
+ negative_prompt=negative_prompt,
1453
+ image=image,
1454
+ mask_image=mask_image,
1455
+ num_inference_steps=num_inference_steps,
1456
+ guidance_scale=guidance_scale,
1457
+ strength=strength,
1458
+ num_images_per_prompt=num_images_per_prompt,
1459
+ add_predicted_noise=add_predicted_noise,
1460
+ eta=eta,
1461
+ generator=generator,
1462
+ prompt_embeds=prompt_embeds,
1463
+ negative_prompt_embeds=negative_prompt_embeds,
1464
+ max_embeddings_multiples=max_embeddings_multiples,
1465
+ output_type=output_type,
1466
+ return_dict=return_dict,
1467
+ callback=callback,
1468
+ is_cancelled_callback=is_cancelled_callback,
1469
+ callback_steps=callback_steps,
1470
+ cross_attention_kwargs=cross_attention_kwargs,
1471
+ )
v0.26.3/lpw_stable_diffusion_onnx.py ADDED
@@ -0,0 +1,1148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import re
3
+ from typing import Callable, List, Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from packaging import version
9
+ from transformers import CLIPImageProcessor, CLIPTokenizer
10
+
11
+ import diffusers
12
+ from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
13
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
14
+ from diffusers.utils import logging
15
+
16
+
17
+ try:
18
+ from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE
19
+ except ImportError:
20
+ ORT_TO_NP_TYPE = {
21
+ "tensor(bool)": np.bool_,
22
+ "tensor(int8)": np.int8,
23
+ "tensor(uint8)": np.uint8,
24
+ "tensor(int16)": np.int16,
25
+ "tensor(uint16)": np.uint16,
26
+ "tensor(int32)": np.int32,
27
+ "tensor(uint32)": np.uint32,
28
+ "tensor(int64)": np.int64,
29
+ "tensor(uint64)": np.uint64,
30
+ "tensor(float16)": np.float16,
31
+ "tensor(float)": np.float32,
32
+ "tensor(double)": np.float64,
33
+ }
34
+
35
+ try:
36
+ from diffusers.utils import PIL_INTERPOLATION
37
+ except ImportError:
38
+ if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
39
+ PIL_INTERPOLATION = {
40
+ "linear": PIL.Image.Resampling.BILINEAR,
41
+ "bilinear": PIL.Image.Resampling.BILINEAR,
42
+ "bicubic": PIL.Image.Resampling.BICUBIC,
43
+ "lanczos": PIL.Image.Resampling.LANCZOS,
44
+ "nearest": PIL.Image.Resampling.NEAREST,
45
+ }
46
+ else:
47
+ PIL_INTERPOLATION = {
48
+ "linear": PIL.Image.LINEAR,
49
+ "bilinear": PIL.Image.BILINEAR,
50
+ "bicubic": PIL.Image.BICUBIC,
51
+ "lanczos": PIL.Image.LANCZOS,
52
+ "nearest": PIL.Image.NEAREST,
53
+ }
54
+ # ------------------------------------------------------------------------------
55
+
56
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
57
+
58
+ re_attention = re.compile(
59
+ r"""
60
+ \\\(|
61
+ \\\)|
62
+ \\\[|
63
+ \\]|
64
+ \\\\|
65
+ \\|
66
+ \(|
67
+ \[|
68
+ :([+-]?[.\d]+)\)|
69
+ \)|
70
+ ]|
71
+ [^\\()\[\]:]+|
72
+ :
73
+ """,
74
+ re.X,
75
+ )
76
+
77
+
78
+ def parse_prompt_attention(text):
79
+ """
80
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
81
+ Accepted tokens are:
82
+ (abc) - increases attention to abc by a multiplier of 1.1
83
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
84
+ [abc] - decreases attention to abc by a multiplier of 1.1
85
+ \\( - literal character '('
86
+ \\[ - literal character '['
87
+ \\) - literal character ')'
88
+ \\] - literal character ']'
89
+ \\ - literal character '\'
90
+ anything else - just text
91
+ >>> parse_prompt_attention('normal text')
92
+ [['normal text', 1.0]]
93
+ >>> parse_prompt_attention('an (important) word')
94
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
95
+ >>> parse_prompt_attention('(unbalanced')
96
+ [['unbalanced', 1.1]]
97
+ >>> parse_prompt_attention('\\(literal\\]')
98
+ [['(literal]', 1.0]]
99
+ >>> parse_prompt_attention('(unnecessary)(parens)')
100
+ [['unnecessaryparens', 1.1]]
101
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
102
+ [['a ', 1.0],
103
+ ['house', 1.5730000000000004],
104
+ [' ', 1.1],
105
+ ['on', 1.0],
106
+ [' a ', 1.1],
107
+ ['hill', 0.55],
108
+ [', sun, ', 1.1],
109
+ ['sky', 1.4641000000000006],
110
+ ['.', 1.1]]
111
+ """
112
+
113
+ res = []
114
+ round_brackets = []
115
+ square_brackets = []
116
+
117
+ round_bracket_multiplier = 1.1
118
+ square_bracket_multiplier = 1 / 1.1
119
+
120
+ def multiply_range(start_position, multiplier):
121
+ for p in range(start_position, len(res)):
122
+ res[p][1] *= multiplier
123
+
124
+ for m in re_attention.finditer(text):
125
+ text = m.group(0)
126
+ weight = m.group(1)
127
+
128
+ if text.startswith("\\"):
129
+ res.append([text[1:], 1.0])
130
+ elif text == "(":
131
+ round_brackets.append(len(res))
132
+ elif text == "[":
133
+ square_brackets.append(len(res))
134
+ elif weight is not None and len(round_brackets) > 0:
135
+ multiply_range(round_brackets.pop(), float(weight))
136
+ elif text == ")" and len(round_brackets) > 0:
137
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
138
+ elif text == "]" and len(square_brackets) > 0:
139
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
140
+ else:
141
+ res.append([text, 1.0])
142
+
143
+ for pos in round_brackets:
144
+ multiply_range(pos, round_bracket_multiplier)
145
+
146
+ for pos in square_brackets:
147
+ multiply_range(pos, square_bracket_multiplier)
148
+
149
+ if len(res) == 0:
150
+ res = [["", 1.0]]
151
+
152
+ # merge runs of identical weights
153
+ i = 0
154
+ while i + 1 < len(res):
155
+ if res[i][1] == res[i + 1][1]:
156
+ res[i][0] += res[i + 1][0]
157
+ res.pop(i + 1)
158
+ else:
159
+ i += 1
160
+
161
+ return res
162
+
163
+
164
+ def get_prompts_with_weights(pipe, prompt: List[str], max_length: int):
165
+ r"""
166
+ Tokenize a list of prompts and return its tokens with weights of each token.
167
+
168
+ No padding, starting or ending token is included.
169
+ """
170
+ tokens = []
171
+ weights = []
172
+ truncated = False
173
+ for text in prompt:
174
+ texts_and_weights = parse_prompt_attention(text)
175
+ text_token = []
176
+ text_weight = []
177
+ for word, weight in texts_and_weights:
178
+ # tokenize and discard the starting and the ending token
179
+ token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1]
180
+ text_token += list(token)
181
+ # copy the weight by length of token
182
+ text_weight += [weight] * len(token)
183
+ # stop if the text is too long (longer than truncation limit)
184
+ if len(text_token) > max_length:
185
+ truncated = True
186
+ break
187
+ # truncate
188
+ if len(text_token) > max_length:
189
+ truncated = True
190
+ text_token = text_token[:max_length]
191
+ text_weight = text_weight[:max_length]
192
+ tokens.append(text_token)
193
+ weights.append(text_weight)
194
+ if truncated:
195
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
196
+ return tokens, weights
197
+
198
+
199
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77):
200
+ r"""
201
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
202
+ """
203
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
204
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
205
+ for i in range(len(tokens)):
206
+ tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos]
207
+ if no_boseos_middle:
208
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
209
+ else:
210
+ w = []
211
+ if len(weights[i]) == 0:
212
+ w = [1.0] * weights_length
213
+ else:
214
+ for j in range(max_embeddings_multiples):
215
+ w.append(1.0) # weight for starting token in this chunk
216
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
217
+ w.append(1.0) # weight for ending token in this chunk
218
+ w += [1.0] * (weights_length - len(w))
219
+ weights[i] = w[:]
220
+
221
+ return tokens, weights
222
+
223
+
224
+ def get_unweighted_text_embeddings(
225
+ pipe,
226
+ text_input: np.array,
227
+ chunk_length: int,
228
+ no_boseos_middle: Optional[bool] = True,
229
+ ):
230
+ """
231
+ When the length of tokens is a multiple of the capacity of the text encoder,
232
+ it should be split into chunks and sent to the text encoder individually.
233
+ """
234
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
235
+ if max_embeddings_multiples > 1:
236
+ text_embeddings = []
237
+ for i in range(max_embeddings_multiples):
238
+ # extract the i-th chunk
239
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy()
240
+
241
+ # cover the head and the tail by the starting and the ending tokens
242
+ text_input_chunk[:, 0] = text_input[0, 0]
243
+ text_input_chunk[:, -1] = text_input[0, -1]
244
+
245
+ text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0]
246
+
247
+ if no_boseos_middle:
248
+ if i == 0:
249
+ # discard the ending token
250
+ text_embedding = text_embedding[:, :-1]
251
+ elif i == max_embeddings_multiples - 1:
252
+ # discard the starting token
253
+ text_embedding = text_embedding[:, 1:]
254
+ else:
255
+ # discard both starting and ending tokens
256
+ text_embedding = text_embedding[:, 1:-1]
257
+
258
+ text_embeddings.append(text_embedding)
259
+ text_embeddings = np.concatenate(text_embeddings, axis=1)
260
+ else:
261
+ text_embeddings = pipe.text_encoder(input_ids=text_input)[0]
262
+ return text_embeddings
263
+
264
+
265
+ def get_weighted_text_embeddings(
266
+ pipe,
267
+ prompt: Union[str, List[str]],
268
+ uncond_prompt: Optional[Union[str, List[str]]] = None,
269
+ max_embeddings_multiples: Optional[int] = 4,
270
+ no_boseos_middle: Optional[bool] = False,
271
+ skip_parsing: Optional[bool] = False,
272
+ skip_weighting: Optional[bool] = False,
273
+ **kwargs,
274
+ ):
275
+ r"""
276
+ Prompts can be assigned with local weights using brackets. For example,
277
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
278
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
279
+
280
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
281
+
282
+ Args:
283
+ pipe (`OnnxStableDiffusionPipeline`):
284
+ Pipe to provide access to the tokenizer and the text encoder.
285
+ prompt (`str` or `List[str]`):
286
+ The prompt or prompts to guide the image generation.
287
+ uncond_prompt (`str` or `List[str]`):
288
+ The unconditional prompt or prompts for guide the image generation. If unconditional prompt
289
+ is provided, the embeddings of prompt and uncond_prompt are concatenated.
290
+ max_embeddings_multiples (`int`, *optional*, defaults to `1`):
291
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
292
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
293
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
294
+ ending token in each of the chunk in the middle.
295
+ skip_parsing (`bool`, *optional*, defaults to `False`):
296
+ Skip the parsing of brackets.
297
+ skip_weighting (`bool`, *optional*, defaults to `False`):
298
+ Skip the weighting. When the parsing is skipped, it is forced True.
299
+ """
300
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
301
+ if isinstance(prompt, str):
302
+ prompt = [prompt]
303
+
304
+ if not skip_parsing:
305
+ prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
306
+ if uncond_prompt is not None:
307
+ if isinstance(uncond_prompt, str):
308
+ uncond_prompt = [uncond_prompt]
309
+ uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
310
+ else:
311
+ prompt_tokens = [
312
+ token[1:-1]
313
+ for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids
314
+ ]
315
+ prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
316
+ if uncond_prompt is not None:
317
+ if isinstance(uncond_prompt, str):
318
+ uncond_prompt = [uncond_prompt]
319
+ uncond_tokens = [
320
+ token[1:-1]
321
+ for token in pipe.tokenizer(
322
+ uncond_prompt,
323
+ max_length=max_length,
324
+ truncation=True,
325
+ return_tensors="np",
326
+ ).input_ids
327
+ ]
328
+ uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
329
+
330
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
331
+ max_length = max([len(token) for token in prompt_tokens])
332
+ if uncond_prompt is not None:
333
+ max_length = max(max_length, max([len(token) for token in uncond_tokens]))
334
+
335
+ max_embeddings_multiples = min(
336
+ max_embeddings_multiples,
337
+ (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
338
+ )
339
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
340
+ max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
341
+
342
+ # pad the length of tokens and weights
343
+ bos = pipe.tokenizer.bos_token_id
344
+ eos = pipe.tokenizer.eos_token_id
345
+ pad = getattr(pipe.tokenizer, "pad_token_id", eos)
346
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
347
+ prompt_tokens,
348
+ prompt_weights,
349
+ max_length,
350
+ bos,
351
+ eos,
352
+ pad,
353
+ no_boseos_middle=no_boseos_middle,
354
+ chunk_length=pipe.tokenizer.model_max_length,
355
+ )
356
+ prompt_tokens = np.array(prompt_tokens, dtype=np.int32)
357
+ if uncond_prompt is not None:
358
+ uncond_tokens, uncond_weights = pad_tokens_and_weights(
359
+ uncond_tokens,
360
+ uncond_weights,
361
+ max_length,
362
+ bos,
363
+ eos,
364
+ pad,
365
+ no_boseos_middle=no_boseos_middle,
366
+ chunk_length=pipe.tokenizer.model_max_length,
367
+ )
368
+ uncond_tokens = np.array(uncond_tokens, dtype=np.int32)
369
+
370
+ # get the embeddings
371
+ text_embeddings = get_unweighted_text_embeddings(
372
+ pipe,
373
+ prompt_tokens,
374
+ pipe.tokenizer.model_max_length,
375
+ no_boseos_middle=no_boseos_middle,
376
+ )
377
+ prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)
378
+ if uncond_prompt is not None:
379
+ uncond_embeddings = get_unweighted_text_embeddings(
380
+ pipe,
381
+ uncond_tokens,
382
+ pipe.tokenizer.model_max_length,
383
+ no_boseos_middle=no_boseos_middle,
384
+ )
385
+ uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype)
386
+
387
+ # assign weights to the prompts and normalize in the sense of mean
388
+ # TODO: should we normalize by chunk or in a whole (current implementation)?
389
+ if (not skip_parsing) and (not skip_weighting):
390
+ previous_mean = text_embeddings.mean(axis=(-2, -1))
391
+ text_embeddings *= prompt_weights[:, :, None]
392
+ text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]
393
+ if uncond_prompt is not None:
394
+ previous_mean = uncond_embeddings.mean(axis=(-2, -1))
395
+ uncond_embeddings *= uncond_weights[:, :, None]
396
+ uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None]
397
+
398
+ # For classifier free guidance, we need to do two forward passes.
399
+ # Here we concatenate the unconditional and text embeddings into a single batch
400
+ # to avoid doing two forward passes
401
+ if uncond_prompt is not None:
402
+ return text_embeddings, uncond_embeddings
403
+
404
+ return text_embeddings
405
+
406
+
407
+ def preprocess_image(image):
408
+ w, h = image.size
409
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
410
+ image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
411
+ image = np.array(image).astype(np.float32) / 255.0
412
+ image = image[None].transpose(0, 3, 1, 2)
413
+ return 2.0 * image - 1.0
414
+
415
+
416
+ def preprocess_mask(mask, scale_factor=8):
417
+ mask = mask.convert("L")
418
+ w, h = mask.size
419
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
420
+ mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
421
+ mask = np.array(mask).astype(np.float32) / 255.0
422
+ mask = np.tile(mask, (4, 1, 1))
423
+ mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
424
+ mask = 1 - mask # repaint white, keep black
425
+ return mask
426
+
427
+
428
+ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline):
429
+ r"""
430
+ Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
431
+ weighting in prompt.
432
+
433
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
434
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
435
+ """
436
+
437
+ if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
438
+
439
+ def __init__(
440
+ self,
441
+ vae_encoder: OnnxRuntimeModel,
442
+ vae_decoder: OnnxRuntimeModel,
443
+ text_encoder: OnnxRuntimeModel,
444
+ tokenizer: CLIPTokenizer,
445
+ unet: OnnxRuntimeModel,
446
+ scheduler: SchedulerMixin,
447
+ safety_checker: OnnxRuntimeModel,
448
+ feature_extractor: CLIPImageProcessor,
449
+ requires_safety_checker: bool = True,
450
+ ):
451
+ super().__init__(
452
+ vae_encoder=vae_encoder,
453
+ vae_decoder=vae_decoder,
454
+ text_encoder=text_encoder,
455
+ tokenizer=tokenizer,
456
+ unet=unet,
457
+ scheduler=scheduler,
458
+ safety_checker=safety_checker,
459
+ feature_extractor=feature_extractor,
460
+ requires_safety_checker=requires_safety_checker,
461
+ )
462
+ self.__init__additional__()
463
+
464
+ else:
465
+
466
+ def __init__(
467
+ self,
468
+ vae_encoder: OnnxRuntimeModel,
469
+ vae_decoder: OnnxRuntimeModel,
470
+ text_encoder: OnnxRuntimeModel,
471
+ tokenizer: CLIPTokenizer,
472
+ unet: OnnxRuntimeModel,
473
+ scheduler: SchedulerMixin,
474
+ safety_checker: OnnxRuntimeModel,
475
+ feature_extractor: CLIPImageProcessor,
476
+ ):
477
+ super().__init__(
478
+ vae_encoder=vae_encoder,
479
+ vae_decoder=vae_decoder,
480
+ text_encoder=text_encoder,
481
+ tokenizer=tokenizer,
482
+ unet=unet,
483
+ scheduler=scheduler,
484
+ safety_checker=safety_checker,
485
+ feature_extractor=feature_extractor,
486
+ )
487
+ self.__init__additional__()
488
+
489
+ def __init__additional__(self):
490
+ self.unet.config.in_channels = 4
491
+ self.vae_scale_factor = 8
492
+
493
+ def _encode_prompt(
494
+ self,
495
+ prompt,
496
+ num_images_per_prompt,
497
+ do_classifier_free_guidance,
498
+ negative_prompt,
499
+ max_embeddings_multiples,
500
+ ):
501
+ r"""
502
+ Encodes the prompt into text encoder hidden states.
503
+
504
+ Args:
505
+ prompt (`str` or `list(int)`):
506
+ prompt to be encoded
507
+ num_images_per_prompt (`int`):
508
+ number of images that should be generated per prompt
509
+ do_classifier_free_guidance (`bool`):
510
+ whether to use classifier free guidance or not
511
+ negative_prompt (`str` or `List[str]`):
512
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
513
+ if `guidance_scale` is less than `1`).
514
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
515
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
516
+ """
517
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
518
+
519
+ if negative_prompt is None:
520
+ negative_prompt = [""] * batch_size
521
+ elif isinstance(negative_prompt, str):
522
+ negative_prompt = [negative_prompt] * batch_size
523
+ if batch_size != len(negative_prompt):
524
+ raise ValueError(
525
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
526
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
527
+ " the batch size of `prompt`."
528
+ )
529
+
530
+ text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
531
+ pipe=self,
532
+ prompt=prompt,
533
+ uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
534
+ max_embeddings_multiples=max_embeddings_multiples,
535
+ )
536
+
537
+ text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0)
538
+ if do_classifier_free_guidance:
539
+ uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0)
540
+ text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
541
+
542
+ return text_embeddings
543
+
544
+ def check_inputs(self, prompt, height, width, strength, callback_steps):
545
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
546
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
547
+
548
+ if strength < 0 or strength > 1:
549
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
550
+
551
+ if height % 8 != 0 or width % 8 != 0:
552
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
553
+
554
+ if (callback_steps is None) or (
555
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
556
+ ):
557
+ raise ValueError(
558
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
559
+ f" {type(callback_steps)}."
560
+ )
561
+
562
+ def get_timesteps(self, num_inference_steps, strength, is_text2img):
563
+ if is_text2img:
564
+ return self.scheduler.timesteps, num_inference_steps
565
+ else:
566
+ # get the original timestep using init_timestep
567
+ offset = self.scheduler.config.get("steps_offset", 0)
568
+ init_timestep = int(num_inference_steps * strength) + offset
569
+ init_timestep = min(init_timestep, num_inference_steps)
570
+
571
+ t_start = max(num_inference_steps - init_timestep + offset, 0)
572
+ timesteps = self.scheduler.timesteps[t_start:]
573
+ return timesteps, num_inference_steps - t_start
574
+
575
+ def run_safety_checker(self, image):
576
+ if self.safety_checker is not None:
577
+ safety_checker_input = self.feature_extractor(
578
+ self.numpy_to_pil(image), return_tensors="np"
579
+ ).pixel_values.astype(image.dtype)
580
+ # There will throw an error if use safety_checker directly and batchsize>1
581
+ images, has_nsfw_concept = [], []
582
+ for i in range(image.shape[0]):
583
+ image_i, has_nsfw_concept_i = self.safety_checker(
584
+ clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
585
+ )
586
+ images.append(image_i)
587
+ has_nsfw_concept.append(has_nsfw_concept_i[0])
588
+ image = np.concatenate(images)
589
+ else:
590
+ has_nsfw_concept = None
591
+ return image, has_nsfw_concept
592
+
593
+ def decode_latents(self, latents):
594
+ latents = 1 / 0.18215 * latents
595
+ # image = self.vae_decoder(latent_sample=latents)[0]
596
+ # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
597
+ image = np.concatenate(
598
+ [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
599
+ )
600
+ image = np.clip(image / 2 + 0.5, 0, 1)
601
+ image = image.transpose((0, 2, 3, 1))
602
+ return image
603
+
604
+ def prepare_extra_step_kwargs(self, generator, eta):
605
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
606
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
607
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
608
+ # and should be between [0, 1]
609
+
610
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
611
+ extra_step_kwargs = {}
612
+ if accepts_eta:
613
+ extra_step_kwargs["eta"] = eta
614
+
615
+ # check if the scheduler accepts generator
616
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
617
+ if accepts_generator:
618
+ extra_step_kwargs["generator"] = generator
619
+ return extra_step_kwargs
620
+
621
+ def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None):
622
+ if image is None:
623
+ shape = (
624
+ batch_size,
625
+ self.unet.config.in_channels,
626
+ height // self.vae_scale_factor,
627
+ width // self.vae_scale_factor,
628
+ )
629
+
630
+ if latents is None:
631
+ latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
632
+ else:
633
+ if latents.shape != shape:
634
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
635
+
636
+ # scale the initial noise by the standard deviation required by the scheduler
637
+ latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy()
638
+ return latents, None, None
639
+ else:
640
+ init_latents = self.vae_encoder(sample=image)[0]
641
+ init_latents = 0.18215 * init_latents
642
+ init_latents = np.concatenate([init_latents] * batch_size, axis=0)
643
+ init_latents_orig = init_latents
644
+ shape = init_latents.shape
645
+
646
+ # add noise to latents using the timesteps
647
+ noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
648
+ latents = self.scheduler.add_noise(
649
+ torch.from_numpy(init_latents), torch.from_numpy(noise), timestep
650
+ ).numpy()
651
+ return latents, init_latents_orig, noise
652
+
653
+ @torch.no_grad()
654
+ def __call__(
655
+ self,
656
+ prompt: Union[str, List[str]],
657
+ negative_prompt: Optional[Union[str, List[str]]] = None,
658
+ image: Union[np.ndarray, PIL.Image.Image] = None,
659
+ mask_image: Union[np.ndarray, PIL.Image.Image] = None,
660
+ height: int = 512,
661
+ width: int = 512,
662
+ num_inference_steps: int = 50,
663
+ guidance_scale: float = 7.5,
664
+ strength: float = 0.8,
665
+ num_images_per_prompt: Optional[int] = 1,
666
+ eta: float = 0.0,
667
+ generator: Optional[torch.Generator] = None,
668
+ latents: Optional[np.ndarray] = None,
669
+ max_embeddings_multiples: Optional[int] = 3,
670
+ output_type: Optional[str] = "pil",
671
+ return_dict: bool = True,
672
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
673
+ is_cancelled_callback: Optional[Callable[[], bool]] = None,
674
+ callback_steps: int = 1,
675
+ **kwargs,
676
+ ):
677
+ r"""
678
+ Function invoked when calling the pipeline for generation.
679
+
680
+ Args:
681
+ prompt (`str` or `List[str]`):
682
+ The prompt or prompts to guide the image generation.
683
+ negative_prompt (`str` or `List[str]`, *optional*):
684
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
685
+ if `guidance_scale` is less than `1`).
686
+ image (`np.ndarray` or `PIL.Image.Image`):
687
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
688
+ process.
689
+ mask_image (`np.ndarray` or `PIL.Image.Image`):
690
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
691
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
692
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
693
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
694
+ height (`int`, *optional*, defaults to 512):
695
+ The height in pixels of the generated image.
696
+ width (`int`, *optional*, defaults to 512):
697
+ The width in pixels of the generated image.
698
+ num_inference_steps (`int`, *optional*, defaults to 50):
699
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
700
+ expense of slower inference.
701
+ guidance_scale (`float`, *optional*, defaults to 7.5):
702
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
703
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
704
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
705
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
706
+ usually at the expense of lower image quality.
707
+ strength (`float`, *optional*, defaults to 0.8):
708
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
709
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
710
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
711
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
712
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
713
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
714
+ The number of images to generate per prompt.
715
+ eta (`float`, *optional*, defaults to 0.0):
716
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
717
+ [`schedulers.DDIMScheduler`], will be ignored for others.
718
+ generator (`torch.Generator`, *optional*):
719
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
720
+ deterministic.
721
+ latents (`np.ndarray`, *optional*):
722
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
723
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
724
+ tensor will ge generated by sampling using the supplied random `generator`.
725
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
726
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
727
+ output_type (`str`, *optional*, defaults to `"pil"`):
728
+ The output format of the generate image. Choose between
729
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
730
+ return_dict (`bool`, *optional*, defaults to `True`):
731
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
732
+ plain tuple.
733
+ callback (`Callable`, *optional*):
734
+ A function that will be called every `callback_steps` steps during inference. The function will be
735
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
736
+ is_cancelled_callback (`Callable`, *optional*):
737
+ A function that will be called every `callback_steps` steps during inference. If the function returns
738
+ `True`, the inference will be cancelled.
739
+ callback_steps (`int`, *optional*, defaults to 1):
740
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
741
+ called at every step.
742
+
743
+ Returns:
744
+ `None` if cancelled by `is_cancelled_callback`,
745
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
746
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
747
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
748
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
749
+ (nsfw) content, according to the `safety_checker`.
750
+ """
751
+ # 0. Default height and width to unet
752
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
753
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
754
+
755
+ # 1. Check inputs. Raise error if not correct
756
+ self.check_inputs(prompt, height, width, strength, callback_steps)
757
+
758
+ # 2. Define call parameters
759
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
760
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
761
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
762
+ # corresponds to doing no classifier free guidance.
763
+ do_classifier_free_guidance = guidance_scale > 1.0
764
+
765
+ # 3. Encode input prompt
766
+ text_embeddings = self._encode_prompt(
767
+ prompt,
768
+ num_images_per_prompt,
769
+ do_classifier_free_guidance,
770
+ negative_prompt,
771
+ max_embeddings_multiples,
772
+ )
773
+ dtype = text_embeddings.dtype
774
+
775
+ # 4. Preprocess image and mask
776
+ if isinstance(image, PIL.Image.Image):
777
+ image = preprocess_image(image)
778
+ if image is not None:
779
+ image = image.astype(dtype)
780
+ if isinstance(mask_image, PIL.Image.Image):
781
+ mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
782
+ if mask_image is not None:
783
+ mask = mask_image.astype(dtype)
784
+ mask = np.concatenate([mask] * batch_size * num_images_per_prompt)
785
+ else:
786
+ mask = None
787
+
788
+ # 5. set timesteps
789
+ self.scheduler.set_timesteps(num_inference_steps)
790
+ timestep_dtype = next(
791
+ (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
792
+ )
793
+ timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
794
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None)
795
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
796
+
797
+ # 6. Prepare latent variables
798
+ latents, init_latents_orig, noise = self.prepare_latents(
799
+ image,
800
+ latent_timestep,
801
+ batch_size * num_images_per_prompt,
802
+ height,
803
+ width,
804
+ dtype,
805
+ generator,
806
+ latents,
807
+ )
808
+
809
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
810
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
811
+
812
+ # 8. Denoising loop
813
+ for i, t in enumerate(self.progress_bar(timesteps)):
814
+ # expand the latents if we are doing classifier free guidance
815
+ latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
816
+ latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
817
+ latent_model_input = latent_model_input.numpy()
818
+
819
+ # predict the noise residual
820
+ noise_pred = self.unet(
821
+ sample=latent_model_input,
822
+ timestep=np.array([t], dtype=timestep_dtype),
823
+ encoder_hidden_states=text_embeddings,
824
+ )
825
+ noise_pred = noise_pred[0]
826
+
827
+ # perform guidance
828
+ if do_classifier_free_guidance:
829
+ noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
830
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
831
+
832
+ # compute the previous noisy sample x_t -> x_t-1
833
+ scheduler_output = self.scheduler.step(
834
+ torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
835
+ )
836
+ latents = scheduler_output.prev_sample.numpy()
837
+
838
+ if mask is not None:
839
+ # masking
840
+ init_latents_proper = self.scheduler.add_noise(
841
+ torch.from_numpy(init_latents_orig),
842
+ torch.from_numpy(noise),
843
+ t,
844
+ ).numpy()
845
+ latents = (init_latents_proper * mask) + (latents * (1 - mask))
846
+
847
+ # call the callback, if provided
848
+ if i % callback_steps == 0:
849
+ if callback is not None:
850
+ step_idx = i // getattr(self.scheduler, "order", 1)
851
+ callback(step_idx, t, latents)
852
+ if is_cancelled_callback is not None and is_cancelled_callback():
853
+ return None
854
+
855
+ # 9. Post-processing
856
+ image = self.decode_latents(latents)
857
+
858
+ # 10. Run safety checker
859
+ image, has_nsfw_concept = self.run_safety_checker(image)
860
+
861
+ # 11. Convert to PIL
862
+ if output_type == "pil":
863
+ image = self.numpy_to_pil(image)
864
+
865
+ if not return_dict:
866
+ return image, has_nsfw_concept
867
+
868
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
869
+
870
+ def text2img(
871
+ self,
872
+ prompt: Union[str, List[str]],
873
+ negative_prompt: Optional[Union[str, List[str]]] = None,
874
+ height: int = 512,
875
+ width: int = 512,
876
+ num_inference_steps: int = 50,
877
+ guidance_scale: float = 7.5,
878
+ num_images_per_prompt: Optional[int] = 1,
879
+ eta: float = 0.0,
880
+ generator: Optional[torch.Generator] = None,
881
+ latents: Optional[np.ndarray] = None,
882
+ max_embeddings_multiples: Optional[int] = 3,
883
+ output_type: Optional[str] = "pil",
884
+ return_dict: bool = True,
885
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
886
+ callback_steps: int = 1,
887
+ **kwargs,
888
+ ):
889
+ r"""
890
+ Function for text-to-image generation.
891
+ Args:
892
+ prompt (`str` or `List[str]`):
893
+ The prompt or prompts to guide the image generation.
894
+ negative_prompt (`str` or `List[str]`, *optional*):
895
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
896
+ if `guidance_scale` is less than `1`).
897
+ height (`int`, *optional*, defaults to 512):
898
+ The height in pixels of the generated image.
899
+ width (`int`, *optional*, defaults to 512):
900
+ The width in pixels of the generated image.
901
+ num_inference_steps (`int`, *optional*, defaults to 50):
902
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
903
+ expense of slower inference.
904
+ guidance_scale (`float`, *optional*, defaults to 7.5):
905
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
906
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
907
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
908
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
909
+ usually at the expense of lower image quality.
910
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
911
+ The number of images to generate per prompt.
912
+ eta (`float`, *optional*, defaults to 0.0):
913
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
914
+ [`schedulers.DDIMScheduler`], will be ignored for others.
915
+ generator (`torch.Generator`, *optional*):
916
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
917
+ deterministic.
918
+ latents (`np.ndarray`, *optional*):
919
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
920
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
921
+ tensor will ge generated by sampling using the supplied random `generator`.
922
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
923
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
924
+ output_type (`str`, *optional*, defaults to `"pil"`):
925
+ The output format of the generate image. Choose between
926
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
927
+ return_dict (`bool`, *optional*, defaults to `True`):
928
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
929
+ plain tuple.
930
+ callback (`Callable`, *optional*):
931
+ A function that will be called every `callback_steps` steps during inference. The function will be
932
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
933
+ callback_steps (`int`, *optional*, defaults to 1):
934
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
935
+ called at every step.
936
+ Returns:
937
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
938
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
939
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
940
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
941
+ (nsfw) content, according to the `safety_checker`.
942
+ """
943
+ return self.__call__(
944
+ prompt=prompt,
945
+ negative_prompt=negative_prompt,
946
+ height=height,
947
+ width=width,
948
+ num_inference_steps=num_inference_steps,
949
+ guidance_scale=guidance_scale,
950
+ num_images_per_prompt=num_images_per_prompt,
951
+ eta=eta,
952
+ generator=generator,
953
+ latents=latents,
954
+ max_embeddings_multiples=max_embeddings_multiples,
955
+ output_type=output_type,
956
+ return_dict=return_dict,
957
+ callback=callback,
958
+ callback_steps=callback_steps,
959
+ **kwargs,
960
+ )
961
+
962
+ def img2img(
963
+ self,
964
+ image: Union[np.ndarray, PIL.Image.Image],
965
+ prompt: Union[str, List[str]],
966
+ negative_prompt: Optional[Union[str, List[str]]] = None,
967
+ strength: float = 0.8,
968
+ num_inference_steps: Optional[int] = 50,
969
+ guidance_scale: Optional[float] = 7.5,
970
+ num_images_per_prompt: Optional[int] = 1,
971
+ eta: Optional[float] = 0.0,
972
+ generator: Optional[torch.Generator] = None,
973
+ max_embeddings_multiples: Optional[int] = 3,
974
+ output_type: Optional[str] = "pil",
975
+ return_dict: bool = True,
976
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
977
+ callback_steps: int = 1,
978
+ **kwargs,
979
+ ):
980
+ r"""
981
+ Function for image-to-image generation.
982
+ Args:
983
+ image (`np.ndarray` or `PIL.Image.Image`):
984
+ `Image`, or ndarray representing an image batch, that will be used as the starting point for the
985
+ process.
986
+ prompt (`str` or `List[str]`):
987
+ The prompt or prompts to guide the image generation.
988
+ negative_prompt (`str` or `List[str]`, *optional*):
989
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
990
+ if `guidance_scale` is less than `1`).
991
+ strength (`float`, *optional*, defaults to 0.8):
992
+ Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
993
+ `image` will be used as a starting point, adding more noise to it the larger the `strength`. The
994
+ number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
995
+ noise will be maximum and the denoising process will run for the full number of iterations specified in
996
+ `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
997
+ num_inference_steps (`int`, *optional*, defaults to 50):
998
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
999
+ expense of slower inference. This parameter will be modulated by `strength`.
1000
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1001
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1002
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1003
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1004
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1005
+ usually at the expense of lower image quality.
1006
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1007
+ The number of images to generate per prompt.
1008
+ eta (`float`, *optional*, defaults to 0.0):
1009
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1010
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1011
+ generator (`torch.Generator`, *optional*):
1012
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1013
+ deterministic.
1014
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1015
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1016
+ output_type (`str`, *optional*, defaults to `"pil"`):
1017
+ The output format of the generate image. Choose between
1018
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1019
+ return_dict (`bool`, *optional*, defaults to `True`):
1020
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1021
+ plain tuple.
1022
+ callback (`Callable`, *optional*):
1023
+ A function that will be called every `callback_steps` steps during inference. The function will be
1024
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1025
+ callback_steps (`int`, *optional*, defaults to 1):
1026
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1027
+ called at every step.
1028
+ Returns:
1029
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1030
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1031
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1032
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1033
+ (nsfw) content, according to the `safety_checker`.
1034
+ """
1035
+ return self.__call__(
1036
+ prompt=prompt,
1037
+ negative_prompt=negative_prompt,
1038
+ image=image,
1039
+ num_inference_steps=num_inference_steps,
1040
+ guidance_scale=guidance_scale,
1041
+ strength=strength,
1042
+ num_images_per_prompt=num_images_per_prompt,
1043
+ eta=eta,
1044
+ generator=generator,
1045
+ max_embeddings_multiples=max_embeddings_multiples,
1046
+ output_type=output_type,
1047
+ return_dict=return_dict,
1048
+ callback=callback,
1049
+ callback_steps=callback_steps,
1050
+ **kwargs,
1051
+ )
1052
+
1053
+ def inpaint(
1054
+ self,
1055
+ image: Union[np.ndarray, PIL.Image.Image],
1056
+ mask_image: Union[np.ndarray, PIL.Image.Image],
1057
+ prompt: Union[str, List[str]],
1058
+ negative_prompt: Optional[Union[str, List[str]]] = None,
1059
+ strength: float = 0.8,
1060
+ num_inference_steps: Optional[int] = 50,
1061
+ guidance_scale: Optional[float] = 7.5,
1062
+ num_images_per_prompt: Optional[int] = 1,
1063
+ eta: Optional[float] = 0.0,
1064
+ generator: Optional[torch.Generator] = None,
1065
+ max_embeddings_multiples: Optional[int] = 3,
1066
+ output_type: Optional[str] = "pil",
1067
+ return_dict: bool = True,
1068
+ callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
1069
+ callback_steps: int = 1,
1070
+ **kwargs,
1071
+ ):
1072
+ r"""
1073
+ Function for inpaint.
1074
+ Args:
1075
+ image (`np.ndarray` or `PIL.Image.Image`):
1076
+ `Image`, or tensor representing an image batch, that will be used as the starting point for the
1077
+ process. This is the image whose masked region will be inpainted.
1078
+ mask_image (`np.ndarray` or `PIL.Image.Image`):
1079
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1080
+ replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
1081
+ PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
1082
+ contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
1083
+ prompt (`str` or `List[str]`):
1084
+ The prompt or prompts to guide the image generation.
1085
+ negative_prompt (`str` or `List[str]`, *optional*):
1086
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
1087
+ if `guidance_scale` is less than `1`).
1088
+ strength (`float`, *optional*, defaults to 0.8):
1089
+ Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
1090
+ is 1, the denoising process will be run on the masked area for the full number of iterations specified
1091
+ in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
1092
+ noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
1093
+ num_inference_steps (`int`, *optional*, defaults to 50):
1094
+ The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
1095
+ the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
1096
+ guidance_scale (`float`, *optional*, defaults to 7.5):
1097
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1098
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1099
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1100
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1101
+ usually at the expense of lower image quality.
1102
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1103
+ The number of images to generate per prompt.
1104
+ eta (`float`, *optional*, defaults to 0.0):
1105
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1106
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1107
+ generator (`torch.Generator`, *optional*):
1108
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
1109
+ deterministic.
1110
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
1111
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
1112
+ output_type (`str`, *optional*, defaults to `"pil"`):
1113
+ The output format of the generate image. Choose between
1114
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1115
+ return_dict (`bool`, *optional*, defaults to `True`):
1116
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1117
+ plain tuple.
1118
+ callback (`Callable`, *optional*):
1119
+ A function that will be called every `callback_steps` steps during inference. The function will be
1120
+ called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
1121
+ callback_steps (`int`, *optional*, defaults to 1):
1122
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1123
+ called at every step.
1124
+ Returns:
1125
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1126
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
1127
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
1128
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
1129
+ (nsfw) content, according to the `safety_checker`.
1130
+ """
1131
+ return self.__call__(
1132
+ prompt=prompt,
1133
+ negative_prompt=negative_prompt,
1134
+ image=image,
1135
+ mask_image=mask_image,
1136
+ num_inference_steps=num_inference_steps,
1137
+ guidance_scale=guidance_scale,
1138
+ strength=strength,
1139
+ num_images_per_prompt=num_images_per_prompt,
1140
+ eta=eta,
1141
+ generator=generator,
1142
+ max_embeddings_multiples=max_embeddings_multiples,
1143
+ output_type=output_type,
1144
+ return_dict=return_dict,
1145
+ callback=callback,
1146
+ callback_steps=callback_steps,
1147
+ **kwargs,
1148
+ )
v0.26.3/lpw_stable_diffusion_xl.py ADDED
The diff for this file is too large to render. See raw diff
 
v0.26.3/magic_mix.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch
4
+ from PIL import Image
5
+ from torchvision import transforms as tfms
6
+ from tqdm.auto import tqdm
7
+ from transformers import CLIPTextModel, CLIPTokenizer
8
+
9
+ from diffusers import (
10
+ AutoencoderKL,
11
+ DDIMScheduler,
12
+ DiffusionPipeline,
13
+ LMSDiscreteScheduler,
14
+ PNDMScheduler,
15
+ UNet2DConditionModel,
16
+ )
17
+
18
+
19
+ class MagicMixPipeline(DiffusionPipeline):
20
+ def __init__(
21
+ self,
22
+ vae: AutoencoderKL,
23
+ text_encoder: CLIPTextModel,
24
+ tokenizer: CLIPTokenizer,
25
+ unet: UNet2DConditionModel,
26
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler],
27
+ ):
28
+ super().__init__()
29
+
30
+ self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
31
+
32
+ # convert PIL image to latents
33
+ def encode(self, img):
34
+ with torch.no_grad():
35
+ latent = self.vae.encode(tfms.ToTensor()(img).unsqueeze(0).to(self.device) * 2 - 1)
36
+ latent = 0.18215 * latent.latent_dist.sample()
37
+ return latent
38
+
39
+ # convert latents to PIL image
40
+ def decode(self, latent):
41
+ latent = (1 / 0.18215) * latent
42
+ with torch.no_grad():
43
+ img = self.vae.decode(latent).sample
44
+ img = (img / 2 + 0.5).clamp(0, 1)
45
+ img = img.detach().cpu().permute(0, 2, 3, 1).numpy()
46
+ img = (img * 255).round().astype("uint8")
47
+ return Image.fromarray(img[0])
48
+
49
+ # convert prompt into text embeddings, also unconditional embeddings
50
+ def prep_text(self, prompt):
51
+ text_input = self.tokenizer(
52
+ prompt,
53
+ padding="max_length",
54
+ max_length=self.tokenizer.model_max_length,
55
+ truncation=True,
56
+ return_tensors="pt",
57
+ )
58
+
59
+ text_embedding = self.text_encoder(text_input.input_ids.to(self.device))[0]
60
+
61
+ uncond_input = self.tokenizer(
62
+ "",
63
+ padding="max_length",
64
+ max_length=self.tokenizer.model_max_length,
65
+ truncation=True,
66
+ return_tensors="pt",
67
+ )
68
+
69
+ uncond_embedding = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
70
+
71
+ return torch.cat([uncond_embedding, text_embedding])
72
+
73
+ def __call__(
74
+ self,
75
+ img: Image.Image,
76
+ prompt: str,
77
+ kmin: float = 0.3,
78
+ kmax: float = 0.6,
79
+ mix_factor: float = 0.5,
80
+ seed: int = 42,
81
+ steps: int = 50,
82
+ guidance_scale: float = 7.5,
83
+ ) -> Image.Image:
84
+ tmin = steps - int(kmin * steps)
85
+ tmax = steps - int(kmax * steps)
86
+
87
+ text_embeddings = self.prep_text(prompt)
88
+
89
+ self.scheduler.set_timesteps(steps)
90
+
91
+ width, height = img.size
92
+ encoded = self.encode(img)
93
+
94
+ torch.manual_seed(seed)
95
+ noise = torch.randn(
96
+ (1, self.unet.config.in_channels, height // 8, width // 8),
97
+ ).to(self.device)
98
+
99
+ latents = self.scheduler.add_noise(
100
+ encoded,
101
+ noise,
102
+ timesteps=self.scheduler.timesteps[tmax],
103
+ )
104
+
105
+ input = torch.cat([latents] * 2)
106
+
107
+ input = self.scheduler.scale_model_input(input, self.scheduler.timesteps[tmax])
108
+
109
+ with torch.no_grad():
110
+ pred = self.unet(
111
+ input,
112
+ self.scheduler.timesteps[tmax],
113
+ encoder_hidden_states=text_embeddings,
114
+ ).sample
115
+
116
+ pred_uncond, pred_text = pred.chunk(2)
117
+ pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
118
+
119
+ latents = self.scheduler.step(pred, self.scheduler.timesteps[tmax], latents).prev_sample
120
+
121
+ for i, t in enumerate(tqdm(self.scheduler.timesteps)):
122
+ if i > tmax:
123
+ if i < tmin: # layout generation phase
124
+ orig_latents = self.scheduler.add_noise(
125
+ encoded,
126
+ noise,
127
+ timesteps=t,
128
+ )
129
+
130
+ input = (
131
+ (mix_factor * latents) + (1 - mix_factor) * orig_latents
132
+ ) # interpolating between layout noise and conditionally generated noise to preserve layout sematics
133
+ input = torch.cat([input] * 2)
134
+
135
+ else: # content generation phase
136
+ input = torch.cat([latents] * 2)
137
+
138
+ input = self.scheduler.scale_model_input(input, t)
139
+
140
+ with torch.no_grad():
141
+ pred = self.unet(
142
+ input,
143
+ t,
144
+ encoder_hidden_states=text_embeddings,
145
+ ).sample
146
+
147
+ pred_uncond, pred_text = pred.chunk(2)
148
+ pred = pred_uncond + guidance_scale * (pred_text - pred_uncond)
149
+
150
+ latents = self.scheduler.step(pred, t, latents).prev_sample
151
+
152
+ return self.decode(latents)
v0.26.3/marigold_depth_estimation.py ADDED
@@ -0,0 +1,601 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Bingxin Ke, ETH Zurich and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # --------------------------------------------------------------------------
15
+ # If you find this code useful, we kindly ask you to cite our paper in your work.
16
+ # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
+ # More information about the method can be found at https://marigoldmonodepth.github.io
18
+ # --------------------------------------------------------------------------
19
+
20
+
21
+ import math
22
+ from typing import Dict, Union
23
+
24
+ import matplotlib
25
+ import numpy as np
26
+ import torch
27
+ from PIL import Image
28
+ from scipy.optimize import minimize
29
+ from torch.utils.data import DataLoader, TensorDataset
30
+ from tqdm.auto import tqdm
31
+ from transformers import CLIPTextModel, CLIPTokenizer
32
+
33
+ from diffusers import (
34
+ AutoencoderKL,
35
+ DDIMScheduler,
36
+ DiffusionPipeline,
37
+ UNet2DConditionModel,
38
+ )
39
+ from diffusers.utils import BaseOutput, check_min_version
40
+
41
+
42
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
43
+ check_min_version("0.26.0")
44
+
45
+ class MarigoldDepthOutput(BaseOutput):
46
+ """
47
+ Output class for Marigold monocular depth prediction pipeline.
48
+
49
+ Args:
50
+ depth_np (`np.ndarray`):
51
+ Predicted depth map, with depth values in the range of [0, 1].
52
+ depth_colored (`PIL.Image.Image`):
53
+ Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
54
+ uncertainty (`None` or `np.ndarray`):
55
+ Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
56
+ """
57
+
58
+ depth_np: np.ndarray
59
+ depth_colored: Image.Image
60
+ uncertainty: Union[None, np.ndarray]
61
+
62
+
63
+ class MarigoldPipeline(DiffusionPipeline):
64
+ """
65
+ Pipeline for monocular depth estimation using Marigold: https://marigoldmonodepth.github.io.
66
+
67
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
68
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
69
+
70
+ Args:
71
+ unet (`UNet2DConditionModel`):
72
+ Conditional U-Net to denoise the depth latent, conditioned on image latent.
73
+ vae (`AutoencoderKL`):
74
+ Variational Auto-Encoder (VAE) Model to encode and decode images and depth maps
75
+ to and from latent representations.
76
+ scheduler (`DDIMScheduler`):
77
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents.
78
+ text_encoder (`CLIPTextModel`):
79
+ Text-encoder, for empty text embedding.
80
+ tokenizer (`CLIPTokenizer`):
81
+ CLIP tokenizer.
82
+ """
83
+
84
+ rgb_latent_scale_factor = 0.18215
85
+ depth_latent_scale_factor = 0.18215
86
+
87
+ def __init__(
88
+ self,
89
+ unet: UNet2DConditionModel,
90
+ vae: AutoencoderKL,
91
+ scheduler: DDIMScheduler,
92
+ text_encoder: CLIPTextModel,
93
+ tokenizer: CLIPTokenizer,
94
+ ):
95
+ super().__init__()
96
+
97
+ self.register_modules(
98
+ unet=unet,
99
+ vae=vae,
100
+ scheduler=scheduler,
101
+ text_encoder=text_encoder,
102
+ tokenizer=tokenizer,
103
+ )
104
+
105
+ self.empty_text_embed = None
106
+
107
+ @torch.no_grad()
108
+ def __call__(
109
+ self,
110
+ input_image: Image,
111
+ denoising_steps: int = 10,
112
+ ensemble_size: int = 10,
113
+ processing_res: int = 768,
114
+ match_input_res: bool = True,
115
+ batch_size: int = 0,
116
+ color_map: str = "Spectral",
117
+ show_progress_bar: bool = True,
118
+ ensemble_kwargs: Dict = None,
119
+ ) -> MarigoldDepthOutput:
120
+ """
121
+ Function invoked when calling the pipeline.
122
+
123
+ Args:
124
+ input_image (`Image`):
125
+ Input RGB (or gray-scale) image.
126
+ processing_res (`int`, *optional*, defaults to `768`):
127
+ Maximum resolution of processing.
128
+ If set to 0: will not resize at all.
129
+ match_input_res (`bool`, *optional*, defaults to `True`):
130
+ Resize depth prediction to match input resolution.
131
+ Only valid if `limit_input_res` is not None.
132
+ denoising_steps (`int`, *optional*, defaults to `10`):
133
+ Number of diffusion denoising steps (DDIM) during inference.
134
+ ensemble_size (`int`, *optional*, defaults to `10`):
135
+ Number of predictions to be ensembled.
136
+ batch_size (`int`, *optional*, defaults to `0`):
137
+ Inference batch size, no bigger than `num_ensemble`.
138
+ If set to 0, the script will automatically decide the proper batch size.
139
+ show_progress_bar (`bool`, *optional*, defaults to `True`):
140
+ Display a progress bar of diffusion denoising.
141
+ color_map (`str`, *optional*, defaults to `"Spectral"`):
142
+ Colormap used to colorize the depth map.
143
+ ensemble_kwargs (`dict`, *optional*, defaults to `None`):
144
+ Arguments for detailed ensembling settings.
145
+ Returns:
146
+ `MarigoldDepthOutput`: Output class for Marigold monocular depth prediction pipeline, including:
147
+ - **depth_np** (`np.ndarray`) Predicted depth map, with depth values in the range of [0, 1]
148
+ - **depth_colored** (`PIL.Image.Image`) Colorized depth map, with the shape of [3, H, W] and values in [0, 1]
149
+ - **uncertainty** (`None` or `np.ndarray`) Uncalibrated uncertainty(MAD, median absolute deviation)
150
+ coming from ensembling. None if `ensemble_size = 1`
151
+ """
152
+
153
+ device = self.device
154
+ input_size = input_image.size
155
+
156
+ if not match_input_res:
157
+ assert processing_res is not None, "Value error: `resize_output_back` is only valid with "
158
+ assert processing_res >= 0
159
+ assert denoising_steps >= 1
160
+ assert ensemble_size >= 1
161
+
162
+ # ----------------- Image Preprocess -----------------
163
+ # Resize image
164
+ if processing_res > 0:
165
+ input_image = self.resize_max_res(input_image, max_edge_resolution=processing_res)
166
+ # Convert the image to RGB, to 1.remove the alpha channel 2.convert B&W to 3-channel
167
+ input_image = input_image.convert("RGB")
168
+ image = np.asarray(input_image)
169
+
170
+ # Normalize rgb values
171
+ rgb = np.transpose(image, (2, 0, 1)) # [H, W, rgb] -> [rgb, H, W]
172
+ rgb_norm = rgb / 255.0
173
+ rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
174
+ rgb_norm = rgb_norm.to(device)
175
+ assert rgb_norm.min() >= 0.0 and rgb_norm.max() <= 1.0
176
+
177
+ # ----------------- Predicting depth -----------------
178
+ # Batch repeated input image
179
+ duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
180
+ single_rgb_dataset = TensorDataset(duplicated_rgb)
181
+ if batch_size > 0:
182
+ _bs = batch_size
183
+ else:
184
+ _bs = self._find_batch_size(
185
+ ensemble_size=ensemble_size,
186
+ input_res=max(rgb_norm.shape[1:]),
187
+ dtype=self.dtype,
188
+ )
189
+
190
+ single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
191
+
192
+ # Predict depth maps (batched)
193
+ depth_pred_ls = []
194
+ if show_progress_bar:
195
+ iterable = tqdm(single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False)
196
+ else:
197
+ iterable = single_rgb_loader
198
+ for batch in iterable:
199
+ (batched_img,) = batch
200
+ depth_pred_raw = self.single_infer(
201
+ rgb_in=batched_img,
202
+ num_inference_steps=denoising_steps,
203
+ show_pbar=show_progress_bar,
204
+ )
205
+ depth_pred_ls.append(depth_pred_raw.detach().clone())
206
+ depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze()
207
+ torch.cuda.empty_cache() # clear vram cache for ensembling
208
+
209
+ # ----------------- Test-time ensembling -----------------
210
+ if ensemble_size > 1:
211
+ depth_pred, pred_uncert = self.ensemble_depths(depth_preds, **(ensemble_kwargs or {}))
212
+ else:
213
+ depth_pred = depth_preds
214
+ pred_uncert = None
215
+
216
+ # ----------------- Post processing -----------------
217
+ # Scale prediction to [0, 1]
218
+ min_d = torch.min(depth_pred)
219
+ max_d = torch.max(depth_pred)
220
+ depth_pred = (depth_pred - min_d) / (max_d - min_d)
221
+
222
+ # Convert to numpy
223
+ depth_pred = depth_pred.cpu().numpy().astype(np.float32)
224
+
225
+ # Resize back to original resolution
226
+ if match_input_res:
227
+ pred_img = Image.fromarray(depth_pred)
228
+ pred_img = pred_img.resize(input_size)
229
+ depth_pred = np.asarray(pred_img)
230
+
231
+ # Clip output range
232
+ depth_pred = depth_pred.clip(0, 1)
233
+
234
+ # Colorize
235
+ depth_colored = self.colorize_depth_maps(
236
+ depth_pred, 0, 1, cmap=color_map
237
+ ).squeeze() # [3, H, W], value in (0, 1)
238
+ depth_colored = (depth_colored * 255).astype(np.uint8)
239
+ depth_colored_hwc = self.chw2hwc(depth_colored)
240
+ depth_colored_img = Image.fromarray(depth_colored_hwc)
241
+ return MarigoldDepthOutput(
242
+ depth_np=depth_pred,
243
+ depth_colored=depth_colored_img,
244
+ uncertainty=pred_uncert,
245
+ )
246
+
247
+ def _encode_empty_text(self):
248
+ """
249
+ Encode text embedding for empty prompt.
250
+ """
251
+ prompt = ""
252
+ text_inputs = self.tokenizer(
253
+ prompt,
254
+ padding="do_not_pad",
255
+ max_length=self.tokenizer.model_max_length,
256
+ truncation=True,
257
+ return_tensors="pt",
258
+ )
259
+ text_input_ids = text_inputs.input_ids.to(self.text_encoder.device)
260
+ self.empty_text_embed = self.text_encoder(text_input_ids)[0].to(self.dtype)
261
+
262
+ @torch.no_grad()
263
+ def single_infer(self, rgb_in: torch.Tensor, num_inference_steps: int, show_pbar: bool) -> torch.Tensor:
264
+ """
265
+ Perform an individual depth prediction without ensembling.
266
+
267
+ Args:
268
+ rgb_in (`torch.Tensor`):
269
+ Input RGB image.
270
+ num_inference_steps (`int`):
271
+ Number of diffusion denoisign steps (DDIM) during inference.
272
+ show_pbar (`bool`):
273
+ Display a progress bar of diffusion denoising.
274
+ Returns:
275
+ `torch.Tensor`: Predicted depth map.
276
+ """
277
+ device = rgb_in.device
278
+
279
+ # Set timesteps
280
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
281
+ timesteps = self.scheduler.timesteps # [T]
282
+
283
+ # Encode image
284
+ rgb_latent = self._encode_rgb(rgb_in)
285
+
286
+ # Initial depth map (noise)
287
+ depth_latent = torch.randn(rgb_latent.shape, device=device, dtype=self.dtype) # [B, 4, h, w]
288
+
289
+ # Batched empty text embedding
290
+ if self.empty_text_embed is None:
291
+ self._encode_empty_text()
292
+ batch_empty_text_embed = self.empty_text_embed.repeat((rgb_latent.shape[0], 1, 1)) # [B, 2, 1024]
293
+
294
+ # Denoising loop
295
+ if show_pbar:
296
+ iterable = tqdm(
297
+ enumerate(timesteps),
298
+ total=len(timesteps),
299
+ leave=False,
300
+ desc=" " * 4 + "Diffusion denoising",
301
+ )
302
+ else:
303
+ iterable = enumerate(timesteps)
304
+
305
+ for i, t in iterable:
306
+ unet_input = torch.cat([rgb_latent, depth_latent], dim=1) # this order is important
307
+
308
+ # predict the noise residual
309
+ noise_pred = self.unet(unet_input, t, encoder_hidden_states=batch_empty_text_embed).sample # [B, 4, h, w]
310
+
311
+ # compute the previous noisy sample x_t -> x_t-1
312
+ depth_latent = self.scheduler.step(noise_pred, t, depth_latent).prev_sample
313
+ torch.cuda.empty_cache()
314
+ depth = self._decode_depth(depth_latent)
315
+
316
+ # clip prediction
317
+ depth = torch.clip(depth, -1.0, 1.0)
318
+ # shift to [0, 1]
319
+ depth = (depth + 1.0) / 2.0
320
+
321
+ return depth
322
+
323
+ def _encode_rgb(self, rgb_in: torch.Tensor) -> torch.Tensor:
324
+ """
325
+ Encode RGB image into latent.
326
+
327
+ Args:
328
+ rgb_in (`torch.Tensor`):
329
+ Input RGB image to be encoded.
330
+
331
+ Returns:
332
+ `torch.Tensor`: Image latent.
333
+ """
334
+ # encode
335
+ h = self.vae.encoder(rgb_in)
336
+ moments = self.vae.quant_conv(h)
337
+ mean, logvar = torch.chunk(moments, 2, dim=1)
338
+ # scale latent
339
+ rgb_latent = mean * self.rgb_latent_scale_factor
340
+ return rgb_latent
341
+
342
+ def _decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
343
+ """
344
+ Decode depth latent into depth map.
345
+
346
+ Args:
347
+ depth_latent (`torch.Tensor`):
348
+ Depth latent to be decoded.
349
+
350
+ Returns:
351
+ `torch.Tensor`: Decoded depth map.
352
+ """
353
+ # scale latent
354
+ depth_latent = depth_latent / self.depth_latent_scale_factor
355
+ # decode
356
+ z = self.vae.post_quant_conv(depth_latent)
357
+ stacked = self.vae.decoder(z)
358
+ # mean of output channels
359
+ depth_mean = stacked.mean(dim=1, keepdim=True)
360
+ return depth_mean
361
+
362
+ @staticmethod
363
+ def resize_max_res(img: Image.Image, max_edge_resolution: int) -> Image.Image:
364
+ """
365
+ Resize image to limit maximum edge length while keeping aspect ratio.
366
+
367
+ Args:
368
+ img (`Image.Image`):
369
+ Image to be resized.
370
+ max_edge_resolution (`int`):
371
+ Maximum edge length (pixel).
372
+
373
+ Returns:
374
+ `Image.Image`: Resized image.
375
+ """
376
+ original_width, original_height = img.size
377
+ downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height)
378
+
379
+ new_width = int(original_width * downscale_factor)
380
+ new_height = int(original_height * downscale_factor)
381
+
382
+ resized_img = img.resize((new_width, new_height))
383
+ return resized_img
384
+
385
+ @staticmethod
386
+ def colorize_depth_maps(depth_map, min_depth, max_depth, cmap="Spectral", valid_mask=None):
387
+ """
388
+ Colorize depth maps.
389
+ """
390
+ assert len(depth_map.shape) >= 2, "Invalid dimension"
391
+
392
+ if isinstance(depth_map, torch.Tensor):
393
+ depth = depth_map.detach().clone().squeeze().numpy()
394
+ elif isinstance(depth_map, np.ndarray):
395
+ depth = depth_map.copy().squeeze()
396
+ # reshape to [ (B,) H, W ]
397
+ if depth.ndim < 3:
398
+ depth = depth[np.newaxis, :, :]
399
+
400
+ # colorize
401
+ cm = matplotlib.colormaps[cmap]
402
+ depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
403
+ img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3] # value from 0 to 1
404
+ img_colored_np = np.rollaxis(img_colored_np, 3, 1)
405
+
406
+ if valid_mask is not None:
407
+ if isinstance(depth_map, torch.Tensor):
408
+ valid_mask = valid_mask.detach().numpy()
409
+ valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
410
+ if valid_mask.ndim < 3:
411
+ valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
412
+ else:
413
+ valid_mask = valid_mask[:, np.newaxis, :, :]
414
+ valid_mask = np.repeat(valid_mask, 3, axis=1)
415
+ img_colored_np[~valid_mask] = 0
416
+
417
+ if isinstance(depth_map, torch.Tensor):
418
+ img_colored = torch.from_numpy(img_colored_np).float()
419
+ elif isinstance(depth_map, np.ndarray):
420
+ img_colored = img_colored_np
421
+
422
+ return img_colored
423
+
424
+ @staticmethod
425
+ def chw2hwc(chw):
426
+ assert 3 == len(chw.shape)
427
+ if isinstance(chw, torch.Tensor):
428
+ hwc = torch.permute(chw, (1, 2, 0))
429
+ elif isinstance(chw, np.ndarray):
430
+ hwc = np.moveaxis(chw, 0, -1)
431
+ return hwc
432
+
433
+ @staticmethod
434
+ def _find_batch_size(ensemble_size: int, input_res: int, dtype: torch.dtype) -> int:
435
+ """
436
+ Automatically search for suitable operating batch size.
437
+
438
+ Args:
439
+ ensemble_size (`int`):
440
+ Number of predictions to be ensembled.
441
+ input_res (`int`):
442
+ Operating resolution of the input image.
443
+
444
+ Returns:
445
+ `int`: Operating batch size.
446
+ """
447
+ # Search table for suggested max. inference batch size
448
+ bs_search_table = [
449
+ # tested on A100-PCIE-80GB
450
+ {"res": 768, "total_vram": 79, "bs": 35, "dtype": torch.float32},
451
+ {"res": 1024, "total_vram": 79, "bs": 20, "dtype": torch.float32},
452
+ # tested on A100-PCIE-40GB
453
+ {"res": 768, "total_vram": 39, "bs": 15, "dtype": torch.float32},
454
+ {"res": 1024, "total_vram": 39, "bs": 8, "dtype": torch.float32},
455
+ {"res": 768, "total_vram": 39, "bs": 30, "dtype": torch.float16},
456
+ {"res": 1024, "total_vram": 39, "bs": 15, "dtype": torch.float16},
457
+ # tested on RTX3090, RTX4090
458
+ {"res": 512, "total_vram": 23, "bs": 20, "dtype": torch.float32},
459
+ {"res": 768, "total_vram": 23, "bs": 7, "dtype": torch.float32},
460
+ {"res": 1024, "total_vram": 23, "bs": 3, "dtype": torch.float32},
461
+ {"res": 512, "total_vram": 23, "bs": 40, "dtype": torch.float16},
462
+ {"res": 768, "total_vram": 23, "bs": 18, "dtype": torch.float16},
463
+ {"res": 1024, "total_vram": 23, "bs": 10, "dtype": torch.float16},
464
+ # tested on GTX1080Ti
465
+ {"res": 512, "total_vram": 10, "bs": 5, "dtype": torch.float32},
466
+ {"res": 768, "total_vram": 10, "bs": 2, "dtype": torch.float32},
467
+ {"res": 512, "total_vram": 10, "bs": 10, "dtype": torch.float16},
468
+ {"res": 768, "total_vram": 10, "bs": 5, "dtype": torch.float16},
469
+ {"res": 1024, "total_vram": 10, "bs": 3, "dtype": torch.float16},
470
+ ]
471
+
472
+ if not torch.cuda.is_available():
473
+ return 1
474
+
475
+ total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3
476
+ filtered_bs_search_table = [s for s in bs_search_table if s["dtype"] == dtype]
477
+ for settings in sorted(
478
+ filtered_bs_search_table,
479
+ key=lambda k: (k["res"], -k["total_vram"]),
480
+ ):
481
+ if input_res <= settings["res"] and total_vram >= settings["total_vram"]:
482
+ bs = settings["bs"]
483
+ if bs > ensemble_size:
484
+ bs = ensemble_size
485
+ elif bs > math.ceil(ensemble_size / 2) and bs < ensemble_size:
486
+ bs = math.ceil(ensemble_size / 2)
487
+ return bs
488
+
489
+ return 1
490
+
491
+ @staticmethod
492
+ def ensemble_depths(
493
+ input_images: torch.Tensor,
494
+ regularizer_strength: float = 0.02,
495
+ max_iter: int = 2,
496
+ tol: float = 1e-3,
497
+ reduction: str = "median",
498
+ max_res: int = None,
499
+ ):
500
+ """
501
+ To ensemble multiple affine-invariant depth images (up to scale and shift),
502
+ by aligning estimating the scale and shift
503
+ """
504
+
505
+ def inter_distances(tensors: torch.Tensor):
506
+ """
507
+ To calculate the distance between each two depth maps.
508
+ """
509
+ distances = []
510
+ for i, j in torch.combinations(torch.arange(tensors.shape[0])):
511
+ arr1 = tensors[i : i + 1]
512
+ arr2 = tensors[j : j + 1]
513
+ distances.append(arr1 - arr2)
514
+ dist = torch.concatenate(distances, dim=0)
515
+ return dist
516
+
517
+ device = input_images.device
518
+ dtype = input_images.dtype
519
+ np_dtype = np.float32
520
+
521
+ original_input = input_images.clone()
522
+ n_img = input_images.shape[0]
523
+ ori_shape = input_images.shape
524
+
525
+ if max_res is not None:
526
+ scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))
527
+ if scale_factor < 1:
528
+ downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
529
+ input_images = downscaler(torch.from_numpy(input_images)).numpy()
530
+
531
+ # init guess
532
+ _min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
533
+ _max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)
534
+ s_init = 1.0 / (_max - _min).reshape((-1, 1, 1))
535
+ t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1))
536
+ x = np.concatenate([s_init, t_init]).reshape(-1).astype(np_dtype)
537
+
538
+ input_images = input_images.to(device)
539
+
540
+ # objective function
541
+ def closure(x):
542
+ l = len(x)
543
+ s = x[: int(l / 2)]
544
+ t = x[int(l / 2) :]
545
+ s = torch.from_numpy(s).to(dtype=dtype).to(device)
546
+ t = torch.from_numpy(t).to(dtype=dtype).to(device)
547
+
548
+ transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))
549
+ dists = inter_distances(transformed_arrays)
550
+ sqrt_dist = torch.sqrt(torch.mean(dists**2))
551
+
552
+ if "mean" == reduction:
553
+ pred = torch.mean(transformed_arrays, dim=0)
554
+ elif "median" == reduction:
555
+ pred = torch.median(transformed_arrays, dim=0).values
556
+ else:
557
+ raise ValueError
558
+
559
+ near_err = torch.sqrt((0 - torch.min(pred)) ** 2)
560
+ far_err = torch.sqrt((1 - torch.max(pred)) ** 2)
561
+
562
+ err = sqrt_dist + (near_err + far_err) * regularizer_strength
563
+ err = err.detach().cpu().numpy().astype(np_dtype)
564
+ return err
565
+
566
+ res = minimize(
567
+ closure,
568
+ x,
569
+ method="BFGS",
570
+ tol=tol,
571
+ options={"maxiter": max_iter, "disp": False},
572
+ )
573
+ x = res.x
574
+ l = len(x)
575
+ s = x[: int(l / 2)]
576
+ t = x[int(l / 2) :]
577
+
578
+ # Prediction
579
+ s = torch.from_numpy(s).to(dtype=dtype).to(device)
580
+ t = torch.from_numpy(t).to(dtype=dtype).to(device)
581
+ transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1)
582
+ if "mean" == reduction:
583
+ aligned_images = torch.mean(transformed_arrays, dim=0)
584
+ std = torch.std(transformed_arrays, dim=0)
585
+ uncertainty = std
586
+ elif "median" == reduction:
587
+ aligned_images = torch.median(transformed_arrays, dim=0).values
588
+ # MAD (median absolute deviation) as uncertainty indicator
589
+ abs_dev = torch.abs(transformed_arrays - aligned_images)
590
+ mad = torch.median(abs_dev, dim=0).values
591
+ uncertainty = mad
592
+ else:
593
+ raise ValueError(f"Unknown reduction method: {reduction}")
594
+
595
+ # Scale and shift to [0, 1]
596
+ _min = torch.min(aligned_images)
597
+ _max = torch.max(aligned_images)
598
+ aligned_images = (aligned_images - _min) / (_max - _min)
599
+ uncertainty /= _max - _min
600
+
601
+ return aligned_images, uncertainty
v0.26.3/masked_stable_diffusion_img2img.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Dict, List, Optional, Union
2
+
3
+ import numpy as np
4
+ import PIL.Image
5
+ import torch
6
+
7
+ from diffusers import StableDiffusionImg2ImgPipeline
8
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
9
+
10
+
11
+ class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
12
+ debug_save = False
13
+
14
+ @torch.no_grad()
15
+ def __call__(
16
+ self,
17
+ prompt: Union[str, List[str]] = None,
18
+ image: Union[
19
+ torch.FloatTensor,
20
+ PIL.Image.Image,
21
+ np.ndarray,
22
+ List[torch.FloatTensor],
23
+ List[PIL.Image.Image],
24
+ List[np.ndarray],
25
+ ] = None,
26
+ strength: float = 0.8,
27
+ num_inference_steps: Optional[int] = 50,
28
+ guidance_scale: Optional[float] = 7.5,
29
+ negative_prompt: Optional[Union[str, List[str]]] = None,
30
+ num_images_per_prompt: Optional[int] = 1,
31
+ eta: Optional[float] = 0.0,
32
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
33
+ prompt_embeds: Optional[torch.FloatTensor] = None,
34
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
35
+ output_type: Optional[str] = "pil",
36
+ return_dict: bool = True,
37
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
38
+ callback_steps: int = 1,
39
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
40
+ mask: Union[
41
+ torch.FloatTensor,
42
+ PIL.Image.Image,
43
+ np.ndarray,
44
+ List[torch.FloatTensor],
45
+ List[PIL.Image.Image],
46
+ List[np.ndarray],
47
+ ] = None,
48
+ ):
49
+ r"""
50
+ The call function to the pipeline for generation.
51
+
52
+ Args:
53
+ prompt (`str` or `List[str]`, *optional*):
54
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
55
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
56
+ `Image` or tensor representing an image batch to be used as the starting point. Can also accept image
57
+ latents as `image`, but if passing latents directly it is not encoded again.
58
+ strength (`float`, *optional*, defaults to 0.8):
59
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
60
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
61
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
62
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
63
+ essentially ignores `image`.
64
+ num_inference_steps (`int`, *optional*, defaults to 50):
65
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
66
+ expense of slower inference. This parameter is modulated by `strength`.
67
+ guidance_scale (`float`, *optional*, defaults to 7.5):
68
+ A higher guidance scale value encourages the model to generate images closely linked to the text
69
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
70
+ negative_prompt (`str` or `List[str]`, *optional*):
71
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
72
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
73
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
74
+ The number of images to generate per prompt.
75
+ eta (`float`, *optional*, defaults to 0.0):
76
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
77
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
78
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
79
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
80
+ generation deterministic.
81
+ prompt_embeds (`torch.FloatTensor`, *optional*):
82
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
83
+ provided, text embeddings are generated from the `prompt` input argument.
84
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
85
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
86
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
87
+ output_type (`str`, *optional*, defaults to `"pil"`):
88
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
89
+ return_dict (`bool`, *optional*, defaults to `True`):
90
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
91
+ plain tuple.
92
+ callback (`Callable`, *optional*):
93
+ A function that calls every `callback_steps` steps during inference. The function is called with the
94
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
95
+ callback_steps (`int`, *optional*, defaults to 1):
96
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
97
+ every step.
98
+ cross_attention_kwargs (`dict`, *optional*):
99
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
100
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
101
+ mask (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`, *optional*):
102
+ A mask with non-zero elements for the area to be inpainted. If not specified, no mask is applied.
103
+ Examples:
104
+
105
+ Returns:
106
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
107
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
108
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
109
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
110
+ "not-safe-for-work" (nsfw) content.
111
+ """
112
+ # code adapted from parent class StableDiffusionImg2ImgPipeline
113
+
114
+ # 0. Check inputs. Raise error if not correct
115
+ self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
116
+
117
+ # 1. Define call parameters
118
+ if prompt is not None and isinstance(prompt, str):
119
+ batch_size = 1
120
+ elif prompt is not None and isinstance(prompt, list):
121
+ batch_size = len(prompt)
122
+ else:
123
+ batch_size = prompt_embeds.shape[0]
124
+ device = self._execution_device
125
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
126
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
127
+ # corresponds to doing no classifier free guidance.
128
+ do_classifier_free_guidance = guidance_scale > 1.0
129
+
130
+ # 2. Encode input prompt
131
+ text_encoder_lora_scale = (
132
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
133
+ )
134
+ prompt_embeds = self._encode_prompt(
135
+ prompt,
136
+ device,
137
+ num_images_per_prompt,
138
+ do_classifier_free_guidance,
139
+ negative_prompt,
140
+ prompt_embeds=prompt_embeds,
141
+ negative_prompt_embeds=negative_prompt_embeds,
142
+ lora_scale=text_encoder_lora_scale,
143
+ )
144
+
145
+ # 3. Preprocess image
146
+ image = self.image_processor.preprocess(image)
147
+
148
+ # 4. set timesteps
149
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
150
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
151
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
152
+
153
+ # 5. Prepare latent variables
154
+ # it is sampled from the latent distribution of the VAE
155
+ latents = self.prepare_latents(
156
+ image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator
157
+ )
158
+
159
+ # mean of the latent distribution
160
+ init_latents = [
161
+ self.vae.encode(image.to(device=device, dtype=prompt_embeds.dtype)[i : i + 1]).latent_dist.mean
162
+ for i in range(batch_size)
163
+ ]
164
+ init_latents = torch.cat(init_latents, dim=0)
165
+
166
+ # 6. create latent mask
167
+ latent_mask = self._make_latent_mask(latents, mask)
168
+
169
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
170
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
171
+
172
+ # 8. Denoising loop
173
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
174
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
175
+ for i, t in enumerate(timesteps):
176
+ # expand the latents if we are doing classifier free guidance
177
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
178
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
179
+
180
+ # predict the noise residual
181
+ noise_pred = self.unet(
182
+ latent_model_input,
183
+ t,
184
+ encoder_hidden_states=prompt_embeds,
185
+ cross_attention_kwargs=cross_attention_kwargs,
186
+ return_dict=False,
187
+ )[0]
188
+
189
+ # perform guidance
190
+ if do_classifier_free_guidance:
191
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
192
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
193
+
194
+ if latent_mask is not None:
195
+ latents = torch.lerp(init_latents * self.vae.config.scaling_factor, latents, latent_mask)
196
+ noise_pred = torch.lerp(torch.zeros_like(noise_pred), noise_pred, latent_mask)
197
+
198
+ # compute the previous noisy sample x_t -> x_t-1
199
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
200
+
201
+ # call the callback, if provided
202
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
203
+ progress_bar.update()
204
+ if callback is not None and i % callback_steps == 0:
205
+ step_idx = i // getattr(self.scheduler, "order", 1)
206
+ callback(step_idx, t, latents)
207
+
208
+ if not output_type == "latent":
209
+ scaled = latents / self.vae.config.scaling_factor
210
+ if latent_mask is not None:
211
+ # scaled = latents / self.vae.config.scaling_factor * latent_mask + init_latents * (1 - latent_mask)
212
+ scaled = torch.lerp(init_latents, scaled, latent_mask)
213
+ image = self.vae.decode(scaled, return_dict=False)[0]
214
+ if self.debug_save:
215
+ image_gen = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
216
+ image_gen = self.image_processor.postprocess(image_gen, output_type=output_type, do_denormalize=[True])
217
+ image_gen[0].save("from_latent.png")
218
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
219
+ else:
220
+ image = latents
221
+ has_nsfw_concept = None
222
+
223
+ if has_nsfw_concept is None:
224
+ do_denormalize = [True] * image.shape[0]
225
+ else:
226
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
227
+
228
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
229
+
230
+ # Offload last model to CPU
231
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
232
+ self.final_offload_hook.offload()
233
+
234
+ if not return_dict:
235
+ return (image, has_nsfw_concept)
236
+
237
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
238
+
239
+ def _make_latent_mask(self, latents, mask):
240
+ if mask is not None:
241
+ latent_mask = []
242
+ if not isinstance(mask, list):
243
+ tmp_mask = [mask]
244
+ else:
245
+ tmp_mask = mask
246
+ _, l_channels, l_height, l_width = latents.shape
247
+ for m in tmp_mask:
248
+ if not isinstance(m, PIL.Image.Image):
249
+ if len(m.shape) == 2:
250
+ m = m[..., np.newaxis]
251
+ if m.max() > 1:
252
+ m = m / 255.0
253
+ m = self.image_processor.numpy_to_pil(m)[0]
254
+ if m.mode != "L":
255
+ m = m.convert("L")
256
+ resized = self.image_processor.resize(m, l_height, l_width)
257
+ if self.debug_save:
258
+ resized.save("latent_mask.png")
259
+ latent_mask.append(np.repeat(np.array(resized)[np.newaxis, :, :], l_channels, axis=0))
260
+ latent_mask = torch.as_tensor(np.stack(latent_mask)).to(latents)
261
+ latent_mask = latent_mask / latent_mask.max()
262
+ return latent_mask
v0.26.3/mixture_canvas.py ADDED
@@ -0,0 +1,501 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from copy import deepcopy
3
+ from dataclasses import asdict, dataclass
4
+ from enum import Enum
5
+ from typing import List, Optional, Union
6
+
7
+ import numpy as np
8
+ import torch
9
+ from numpy import exp, pi, sqrt
10
+ from torchvision.transforms.functional import resize
11
+ from tqdm.auto import tqdm
12
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
13
+
14
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
15
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
16
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
17
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
18
+
19
+
20
+ def preprocess_image(image):
21
+ from PIL import Image
22
+
23
+ """Preprocess an input image
24
+
25
+ Same as
26
+ https://github.com/huggingface/diffusers/blob/1138d63b519e37f0ce04e027b9f4a3261d27c628/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L44
27
+ """
28
+ w, h = image.size
29
+ w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
30
+ image = image.resize((w, h), resample=Image.LANCZOS)
31
+ image = np.array(image).astype(np.float32) / 255.0
32
+ image = image[None].transpose(0, 3, 1, 2)
33
+ image = torch.from_numpy(image)
34
+ return 2.0 * image - 1.0
35
+
36
+
37
+ @dataclass
38
+ class CanvasRegion:
39
+ """Class defining a rectangular region in the canvas"""
40
+
41
+ row_init: int # Region starting row in pixel space (included)
42
+ row_end: int # Region end row in pixel space (not included)
43
+ col_init: int # Region starting column in pixel space (included)
44
+ col_end: int # Region end column in pixel space (not included)
45
+ region_seed: int = None # Seed for random operations in this region
46
+ noise_eps: float = 0.0 # Deviation of a zero-mean gaussian noise to be applied over the latents in this region. Useful for slightly "rerolling" latents
47
+
48
+ def __post_init__(self):
49
+ # Initialize arguments if not specified
50
+ if self.region_seed is None:
51
+ self.region_seed = np.random.randint(9999999999)
52
+ # Check coordinates are non-negative
53
+ for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
54
+ if coord < 0:
55
+ raise ValueError(
56
+ f"A CanvasRegion must be defined with non-negative indices, found ({self.row_init}, {self.row_end}, {self.col_init}, {self.col_end})"
57
+ )
58
+ # Check coordinates are divisible by 8, else we end up with nasty rounding error when mapping to latent space
59
+ for coord in [self.row_init, self.row_end, self.col_init, self.col_end]:
60
+ if coord // 8 != coord / 8:
61
+ raise ValueError(
62
+ f"A CanvasRegion must be defined with locations divisible by 8, found ({self.row_init}-{self.row_end}, {self.col_init}-{self.col_end})"
63
+ )
64
+ # Check noise eps is non-negative
65
+ if self.noise_eps < 0:
66
+ raise ValueError(f"A CanvasRegion must be defined noises eps non-negative, found {self.noise_eps}")
67
+ # Compute coordinates for this region in latent space
68
+ self.latent_row_init = self.row_init // 8
69
+ self.latent_row_end = self.row_end // 8
70
+ self.latent_col_init = self.col_init // 8
71
+ self.latent_col_end = self.col_end // 8
72
+
73
+ @property
74
+ def width(self):
75
+ return self.col_end - self.col_init
76
+
77
+ @property
78
+ def height(self):
79
+ return self.row_end - self.row_init
80
+
81
+ def get_region_generator(self, device="cpu"):
82
+ """Creates a torch.Generator based on the random seed of this region"""
83
+ # Initialize region generator
84
+ return torch.Generator(device).manual_seed(self.region_seed)
85
+
86
+ @property
87
+ def __dict__(self):
88
+ return asdict(self)
89
+
90
+
91
+ class MaskModes(Enum):
92
+ """Modes in which the influence of diffuser is masked"""
93
+
94
+ CONSTANT = "constant"
95
+ GAUSSIAN = "gaussian"
96
+ QUARTIC = "quartic" # See https://en.wikipedia.org/wiki/Kernel_(statistics)
97
+
98
+
99
+ @dataclass
100
+ class DiffusionRegion(CanvasRegion):
101
+ """Abstract class defining a region where some class of diffusion process is acting"""
102
+
103
+ pass
104
+
105
+
106
+ @dataclass
107
+ class Text2ImageRegion(DiffusionRegion):
108
+ """Class defining a region where a text guided diffusion process is acting"""
109
+
110
+ prompt: str = "" # Text prompt guiding the diffuser in this region
111
+ guidance_scale: float = 7.5 # Guidance scale of the diffuser in this region. If None, randomize
112
+ mask_type: MaskModes = MaskModes.GAUSSIAN.value # Kind of weight mask applied to this region
113
+ mask_weight: float = 1.0 # Global weights multiplier of the mask
114
+ tokenized_prompt = None # Tokenized prompt
115
+ encoded_prompt = None # Encoded prompt
116
+
117
+ def __post_init__(self):
118
+ super().__post_init__()
119
+ # Mask weight cannot be negative
120
+ if self.mask_weight < 0:
121
+ raise ValueError(
122
+ f"A Text2ImageRegion must be defined with non-negative mask weight, found {self.mask_weight}"
123
+ )
124
+ # Mask type must be an actual known mask
125
+ if self.mask_type not in [e.value for e in MaskModes]:
126
+ raise ValueError(
127
+ f"A Text2ImageRegion was defined with mask {self.mask_type}, which is not an accepted mask ({[e.value for e in MaskModes]})"
128
+ )
129
+ # Randomize arguments if given as None
130
+ if self.guidance_scale is None:
131
+ self.guidance_scale = np.random.randint(5, 30)
132
+ # Clean prompt
133
+ self.prompt = re.sub(" +", " ", self.prompt).replace("\n", " ")
134
+
135
+ def tokenize_prompt(self, tokenizer):
136
+ """Tokenizes the prompt for this diffusion region using a given tokenizer"""
137
+ self.tokenized_prompt = tokenizer(
138
+ self.prompt,
139
+ padding="max_length",
140
+ max_length=tokenizer.model_max_length,
141
+ truncation=True,
142
+ return_tensors="pt",
143
+ )
144
+
145
+ def encode_prompt(self, text_encoder, device):
146
+ """Encodes the previously tokenized prompt for this diffusion region using a given encoder"""
147
+ assert self.tokenized_prompt is not None, ValueError(
148
+ "Prompt in diffusion region must be tokenized before encoding"
149
+ )
150
+ self.encoded_prompt = text_encoder(self.tokenized_prompt.input_ids.to(device))[0]
151
+
152
+
153
+ @dataclass
154
+ class Image2ImageRegion(DiffusionRegion):
155
+ """Class defining a region where an image guided diffusion process is acting"""
156
+
157
+ reference_image: torch.FloatTensor = None
158
+ strength: float = 0.8 # Strength of the image
159
+
160
+ def __post_init__(self):
161
+ super().__post_init__()
162
+ if self.reference_image is None:
163
+ raise ValueError("Must provide a reference image when creating an Image2ImageRegion")
164
+ if self.strength < 0 or self.strength > 1:
165
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {self.strength}")
166
+ # Rescale image to region shape
167
+ self.reference_image = resize(self.reference_image, size=[self.height, self.width])
168
+
169
+ def encode_reference_image(self, encoder, device, generator, cpu_vae=False):
170
+ """Encodes the reference image for this Image2Image region into the latent space"""
171
+ # Place encoder in CPU or not following the parameter cpu_vae
172
+ if cpu_vae:
173
+ # Note here we use mean instead of sample, to avoid moving also generator to CPU, which is troublesome
174
+ self.reference_latents = encoder.cpu().encode(self.reference_image).latent_dist.mean.to(device)
175
+ else:
176
+ self.reference_latents = encoder.encode(self.reference_image.to(device)).latent_dist.sample(
177
+ generator=generator
178
+ )
179
+ self.reference_latents = 0.18215 * self.reference_latents
180
+
181
+ @property
182
+ def __dict__(self):
183
+ # This class requires special casting to dict because of the reference_image tensor. Otherwise it cannot be casted to JSON
184
+
185
+ # Get all basic fields from parent class
186
+ super_fields = {key: getattr(self, key) for key in DiffusionRegion.__dataclass_fields__.keys()}
187
+ # Pack other fields
188
+ return {**super_fields, "reference_image": self.reference_image.cpu().tolist(), "strength": self.strength}
189
+
190
+
191
+ class RerollModes(Enum):
192
+ """Modes in which the reroll regions operate"""
193
+
194
+ RESET = "reset" # Completely reset the random noise in the region
195
+ EPSILON = "epsilon" # Alter slightly the latents in the region
196
+
197
+
198
+ @dataclass
199
+ class RerollRegion(CanvasRegion):
200
+ """Class defining a rectangular canvas region in which initial latent noise will be rerolled"""
201
+
202
+ reroll_mode: RerollModes = RerollModes.RESET.value
203
+
204
+
205
+ @dataclass
206
+ class MaskWeightsBuilder:
207
+ """Auxiliary class to compute a tensor of weights for a given diffusion region"""
208
+
209
+ latent_space_dim: int # Size of the U-net latent space
210
+ nbatch: int = 1 # Batch size in the U-net
211
+
212
+ def compute_mask_weights(self, region: DiffusionRegion) -> torch.tensor:
213
+ """Computes a tensor of weights for a given diffusion region"""
214
+ MASK_BUILDERS = {
215
+ MaskModes.CONSTANT.value: self._constant_weights,
216
+ MaskModes.GAUSSIAN.value: self._gaussian_weights,
217
+ MaskModes.QUARTIC.value: self._quartic_weights,
218
+ }
219
+ return MASK_BUILDERS[region.mask_type](region)
220
+
221
+ def _constant_weights(self, region: DiffusionRegion) -> torch.tensor:
222
+ """Computes a tensor of constant for a given diffusion region"""
223
+ latent_width = region.latent_col_end - region.latent_col_init
224
+ latent_height = region.latent_row_end - region.latent_row_init
225
+ return torch.ones(self.nbatch, self.latent_space_dim, latent_height, latent_width) * region.mask_weight
226
+
227
+ def _gaussian_weights(self, region: DiffusionRegion) -> torch.tensor:
228
+ """Generates a gaussian mask of weights for tile contributions"""
229
+ latent_width = region.latent_col_end - region.latent_col_init
230
+ latent_height = region.latent_row_end - region.latent_row_init
231
+
232
+ var = 0.01
233
+ midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
234
+ x_probs = [
235
+ exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
236
+ for x in range(latent_width)
237
+ ]
238
+ midpoint = (latent_height - 1) / 2
239
+ y_probs = [
240
+ exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
241
+ for y in range(latent_height)
242
+ ]
243
+
244
+ weights = np.outer(y_probs, x_probs) * region.mask_weight
245
+ return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
246
+
247
+ def _quartic_weights(self, region: DiffusionRegion) -> torch.tensor:
248
+ """Generates a quartic mask of weights for tile contributions
249
+
250
+ The quartic kernel has bounded support over the diffusion region, and a smooth decay to the region limits.
251
+ """
252
+ quartic_constant = 15.0 / 16.0
253
+
254
+ support = (np.array(range(region.latent_col_init, region.latent_col_end)) - region.latent_col_init) / (
255
+ region.latent_col_end - region.latent_col_init - 1
256
+ ) * 1.99 - (1.99 / 2.0)
257
+ x_probs = quartic_constant * np.square(1 - np.square(support))
258
+ support = (np.array(range(region.latent_row_init, region.latent_row_end)) - region.latent_row_init) / (
259
+ region.latent_row_end - region.latent_row_init - 1
260
+ ) * 1.99 - (1.99 / 2.0)
261
+ y_probs = quartic_constant * np.square(1 - np.square(support))
262
+
263
+ weights = np.outer(y_probs, x_probs) * region.mask_weight
264
+ return torch.tile(torch.tensor(weights), (self.nbatch, self.latent_space_dim, 1, 1))
265
+
266
+
267
+ class StableDiffusionCanvasPipeline(DiffusionPipeline):
268
+ """Stable Diffusion pipeline that mixes several diffusers in the same canvas"""
269
+
270
+ def __init__(
271
+ self,
272
+ vae: AutoencoderKL,
273
+ text_encoder: CLIPTextModel,
274
+ tokenizer: CLIPTokenizer,
275
+ unet: UNet2DConditionModel,
276
+ scheduler: Union[DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler],
277
+ safety_checker: StableDiffusionSafetyChecker,
278
+ feature_extractor: CLIPFeatureExtractor,
279
+ ):
280
+ super().__init__()
281
+ self.register_modules(
282
+ vae=vae,
283
+ text_encoder=text_encoder,
284
+ tokenizer=tokenizer,
285
+ unet=unet,
286
+ scheduler=scheduler,
287
+ safety_checker=safety_checker,
288
+ feature_extractor=feature_extractor,
289
+ )
290
+
291
+ def decode_latents(self, latents, cpu_vae=False):
292
+ """Decodes a given array of latents into pixel space"""
293
+ # scale and decode the image latents with vae
294
+ if cpu_vae:
295
+ lat = deepcopy(latents).cpu()
296
+ vae = deepcopy(self.vae).cpu()
297
+ else:
298
+ lat = latents
299
+ vae = self.vae
300
+
301
+ lat = 1 / 0.18215 * lat
302
+ image = vae.decode(lat).sample
303
+
304
+ image = (image / 2 + 0.5).clamp(0, 1)
305
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
306
+
307
+ return self.numpy_to_pil(image)
308
+
309
+ def get_latest_timestep_img2img(self, num_inference_steps, strength):
310
+ """Finds the latest timesteps where an img2img strength does not impose latents anymore"""
311
+ # get the original timestep using init_timestep
312
+ offset = self.scheduler.config.get("steps_offset", 0)
313
+ init_timestep = int(num_inference_steps * (1 - strength)) + offset
314
+ init_timestep = min(init_timestep, num_inference_steps)
315
+
316
+ t_start = min(max(num_inference_steps - init_timestep + offset, 0), num_inference_steps - 1)
317
+ latest_timestep = self.scheduler.timesteps[t_start]
318
+
319
+ return latest_timestep
320
+
321
+ @torch.no_grad()
322
+ def __call__(
323
+ self,
324
+ canvas_height: int,
325
+ canvas_width: int,
326
+ regions: List[DiffusionRegion],
327
+ num_inference_steps: Optional[int] = 50,
328
+ seed: Optional[int] = 12345,
329
+ reroll_regions: Optional[List[RerollRegion]] = None,
330
+ cpu_vae: Optional[bool] = False,
331
+ decode_steps: Optional[bool] = False,
332
+ ):
333
+ if reroll_regions is None:
334
+ reroll_regions = []
335
+ batch_size = 1
336
+
337
+ if decode_steps:
338
+ steps_images = []
339
+
340
+ # Prepare scheduler
341
+ self.scheduler.set_timesteps(num_inference_steps, device=self.device)
342
+
343
+ # Split diffusion regions by their kind
344
+ text2image_regions = [region for region in regions if isinstance(region, Text2ImageRegion)]
345
+ image2image_regions = [region for region in regions if isinstance(region, Image2ImageRegion)]
346
+
347
+ # Prepare text embeddings
348
+ for region in text2image_regions:
349
+ region.tokenize_prompt(self.tokenizer)
350
+ region.encode_prompt(self.text_encoder, self.device)
351
+
352
+ # Create original noisy latents using the timesteps
353
+ latents_shape = (batch_size, self.unet.config.in_channels, canvas_height // 8, canvas_width // 8)
354
+ generator = torch.Generator(self.device).manual_seed(seed)
355
+ init_noise = torch.randn(latents_shape, generator=generator, device=self.device)
356
+
357
+ # Reset latents in seed reroll regions, if requested
358
+ for region in reroll_regions:
359
+ if region.reroll_mode == RerollModes.RESET.value:
360
+ region_shape = (
361
+ latents_shape[0],
362
+ latents_shape[1],
363
+ region.latent_row_end - region.latent_row_init,
364
+ region.latent_col_end - region.latent_col_init,
365
+ )
366
+ init_noise[
367
+ :,
368
+ :,
369
+ region.latent_row_init : region.latent_row_end,
370
+ region.latent_col_init : region.latent_col_end,
371
+ ] = torch.randn(region_shape, generator=region.get_region_generator(self.device), device=self.device)
372
+
373
+ # Apply epsilon noise to regions: first diffusion regions, then reroll regions
374
+ all_eps_rerolls = regions + [r for r in reroll_regions if r.reroll_mode == RerollModes.EPSILON.value]
375
+ for region in all_eps_rerolls:
376
+ if region.noise_eps > 0:
377
+ region_noise = init_noise[
378
+ :,
379
+ :,
380
+ region.latent_row_init : region.latent_row_end,
381
+ region.latent_col_init : region.latent_col_end,
382
+ ]
383
+ eps_noise = (
384
+ torch.randn(
385
+ region_noise.shape, generator=region.get_region_generator(self.device), device=self.device
386
+ )
387
+ * region.noise_eps
388
+ )
389
+ init_noise[
390
+ :,
391
+ :,
392
+ region.latent_row_init : region.latent_row_end,
393
+ region.latent_col_init : region.latent_col_end,
394
+ ] += eps_noise
395
+
396
+ # scale the initial noise by the standard deviation required by the scheduler
397
+ latents = init_noise * self.scheduler.init_noise_sigma
398
+
399
+ # Get unconditional embeddings for classifier free guidance in text2image regions
400
+ for region in text2image_regions:
401
+ max_length = region.tokenized_prompt.input_ids.shape[-1]
402
+ uncond_input = self.tokenizer(
403
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
404
+ )
405
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
406
+
407
+ # For classifier free guidance, we need to do two forward passes.
408
+ # Here we concatenate the unconditional and text embeddings into a single batch
409
+ # to avoid doing two forward passes
410
+ region.encoded_prompt = torch.cat([uncond_embeddings, region.encoded_prompt])
411
+
412
+ # Prepare image latents
413
+ for region in image2image_regions:
414
+ region.encode_reference_image(self.vae, device=self.device, generator=generator)
415
+
416
+ # Prepare mask of weights for each region
417
+ mask_builder = MaskWeightsBuilder(latent_space_dim=self.unet.config.in_channels, nbatch=batch_size)
418
+ mask_weights = [mask_builder.compute_mask_weights(region).to(self.device) for region in text2image_regions]
419
+
420
+ # Diffusion timesteps
421
+ for i, t in tqdm(enumerate(self.scheduler.timesteps)):
422
+ # Diffuse each region
423
+ noise_preds_regions = []
424
+
425
+ # text2image regions
426
+ for region in text2image_regions:
427
+ region_latents = latents[
428
+ :,
429
+ :,
430
+ region.latent_row_init : region.latent_row_end,
431
+ region.latent_col_init : region.latent_col_end,
432
+ ]
433
+ # expand the latents if we are doing classifier free guidance
434
+ latent_model_input = torch.cat([region_latents] * 2)
435
+ # scale model input following scheduler rules
436
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
437
+ # predict the noise residual
438
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=region.encoded_prompt)["sample"]
439
+ # perform guidance
440
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
441
+ noise_pred_region = noise_pred_uncond + region.guidance_scale * (noise_pred_text - noise_pred_uncond)
442
+ noise_preds_regions.append(noise_pred_region)
443
+
444
+ # Merge noise predictions for all tiles
445
+ noise_pred = torch.zeros(latents.shape, device=self.device)
446
+ contributors = torch.zeros(latents.shape, device=self.device)
447
+ # Add each tile contribution to overall latents
448
+ for region, noise_pred_region, mask_weights_region in zip(
449
+ text2image_regions, noise_preds_regions, mask_weights
450
+ ):
451
+ noise_pred[
452
+ :,
453
+ :,
454
+ region.latent_row_init : region.latent_row_end,
455
+ region.latent_col_init : region.latent_col_end,
456
+ ] += noise_pred_region * mask_weights_region
457
+ contributors[
458
+ :,
459
+ :,
460
+ region.latent_row_init : region.latent_row_end,
461
+ region.latent_col_init : region.latent_col_end,
462
+ ] += mask_weights_region
463
+ # Average overlapping areas with more than 1 contributor
464
+ noise_pred /= contributors
465
+ noise_pred = torch.nan_to_num(
466
+ noise_pred
467
+ ) # Replace NaNs by zeros: NaN can appear if a position is not covered by any DiffusionRegion
468
+
469
+ # compute the previous noisy sample x_t -> x_t-1
470
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
471
+
472
+ # Image2Image regions: override latents generated by the scheduler
473
+ for region in image2image_regions:
474
+ influence_step = self.get_latest_timestep_img2img(num_inference_steps, region.strength)
475
+ # Only override in the timesteps before the last influence step of the image (given by its strength)
476
+ if t > influence_step:
477
+ timestep = t.repeat(batch_size)
478
+ region_init_noise = init_noise[
479
+ :,
480
+ :,
481
+ region.latent_row_init : region.latent_row_end,
482
+ region.latent_col_init : region.latent_col_end,
483
+ ]
484
+ region_latents = self.scheduler.add_noise(region.reference_latents, region_init_noise, timestep)
485
+ latents[
486
+ :,
487
+ :,
488
+ region.latent_row_init : region.latent_row_end,
489
+ region.latent_col_init : region.latent_col_end,
490
+ ] = region_latents
491
+
492
+ if decode_steps:
493
+ steps_images.append(self.decode_latents(latents, cpu_vae))
494
+
495
+ # scale and decode the image latents with vae
496
+ image = self.decode_latents(latents, cpu_vae)
497
+
498
+ output = {"images": image}
499
+ if decode_steps:
500
+ output = {**output, "steps_images": steps_images}
501
+ return output
v0.26.3/mixture_tiling.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from copy import deepcopy
3
+ from enum import Enum
4
+ from typing import List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ from tqdm.auto import tqdm
8
+
9
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
10
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
11
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
12
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
13
+ from diffusers.utils import logging
14
+
15
+
16
+ try:
17
+ from ligo.segments import segment
18
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
19
+ except ImportError:
20
+ raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
21
+
22
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
+
24
+ EXAMPLE_DOC_STRING = """
25
+ Examples:
26
+ ```py
27
+ >>> from diffusers import LMSDiscreteScheduler, DiffusionPipeline
28
+
29
+ >>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
30
+ >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler, custom_pipeline="mixture_tiling")
31
+ >>> pipeline.to("cuda")
32
+
33
+ >>> image = pipeline(
34
+ >>> prompt=[[
35
+ >>> "A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
36
+ >>> "A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
37
+ >>> "An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece"
38
+ >>> ]],
39
+ >>> tile_height=640,
40
+ >>> tile_width=640,
41
+ >>> tile_row_overlap=0,
42
+ >>> tile_col_overlap=256,
43
+ >>> guidance_scale=8,
44
+ >>> seed=7178915308,
45
+ >>> num_inference_steps=50,
46
+ >>> )["images"][0]
47
+ ```
48
+ """
49
+
50
+
51
+ def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
52
+ """Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
53
+
54
+ Returns a tuple with:
55
+ - Starting coordinates of rows in pixel space
56
+ - Ending coordinates of rows in pixel space
57
+ - Starting coordinates of columns in pixel space
58
+ - Ending coordinates of columns in pixel space
59
+ """
60
+ px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
61
+ px_row_end = px_row_init + tile_height
62
+ px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
63
+ px_col_end = px_col_init + tile_width
64
+ return px_row_init, px_row_end, px_col_init, px_col_end
65
+
66
+
67
+ def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
68
+ """Translates coordinates in pixel space to coordinates in latent space"""
69
+ return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
70
+
71
+
72
+ def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
73
+ """Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
74
+
75
+ Returns a tuple with:
76
+ - Starting coordinates of rows in latent space
77
+ - Ending coordinates of rows in latent space
78
+ - Starting coordinates of columns in latent space
79
+ - Ending coordinates of columns in latent space
80
+ """
81
+ px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
82
+ tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
83
+ )
84
+ return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
85
+
86
+
87
+ def _tile2latent_exclusive_indices(
88
+ tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
89
+ ):
90
+ """Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
91
+
92
+ Returns a tuple with:
93
+ - Starting coordinates of rows in latent space
94
+ - Ending coordinates of rows in latent space
95
+ - Starting coordinates of columns in latent space
96
+ - Ending coordinates of columns in latent space
97
+ """
98
+ row_init, row_end, col_init, col_end = _tile2latent_indices(
99
+ tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
100
+ )
101
+ row_segment = segment(row_init, row_end)
102
+ col_segment = segment(col_init, col_end)
103
+ # Iterate over the rest of tiles, clipping the region for the current tile
104
+ for row in range(rows):
105
+ for column in range(columns):
106
+ if row != tile_row and column != tile_col:
107
+ clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
108
+ row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
109
+ )
110
+ row_segment = row_segment - segment(clip_row_init, clip_row_end)
111
+ col_segment = col_segment - segment(clip_col_init, clip_col_end)
112
+ # return row_init, row_end, col_init, col_end
113
+ return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
114
+
115
+
116
+ class StableDiffusionExtrasMixin:
117
+ """Mixin providing additional convenience method to Stable Diffusion pipelines"""
118
+
119
+ def decode_latents(self, latents, cpu_vae=False):
120
+ """Decodes a given array of latents into pixel space"""
121
+ # scale and decode the image latents with vae
122
+ if cpu_vae:
123
+ lat = deepcopy(latents).cpu()
124
+ vae = deepcopy(self.vae).cpu()
125
+ else:
126
+ lat = latents
127
+ vae = self.vae
128
+
129
+ lat = 1 / 0.18215 * lat
130
+ image = vae.decode(lat).sample
131
+
132
+ image = (image / 2 + 0.5).clamp(0, 1)
133
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
134
+
135
+ return self.numpy_to_pil(image)
136
+
137
+
138
+ class StableDiffusionTilingPipeline(DiffusionPipeline, StableDiffusionExtrasMixin):
139
+ def __init__(
140
+ self,
141
+ vae: AutoencoderKL,
142
+ text_encoder: CLIPTextModel,
143
+ tokenizer: CLIPTokenizer,
144
+ unet: UNet2DConditionModel,
145
+ scheduler: Union[DDIMScheduler, PNDMScheduler],
146
+ safety_checker: StableDiffusionSafetyChecker,
147
+ feature_extractor: CLIPFeatureExtractor,
148
+ ):
149
+ super().__init__()
150
+ self.register_modules(
151
+ vae=vae,
152
+ text_encoder=text_encoder,
153
+ tokenizer=tokenizer,
154
+ unet=unet,
155
+ scheduler=scheduler,
156
+ safety_checker=safety_checker,
157
+ feature_extractor=feature_extractor,
158
+ )
159
+
160
+ class SeedTilesMode(Enum):
161
+ """Modes in which the latents of a particular tile can be re-seeded"""
162
+
163
+ FULL = "full"
164
+ EXCLUSIVE = "exclusive"
165
+
166
+ @torch.no_grad()
167
+ def __call__(
168
+ self,
169
+ prompt: Union[str, List[List[str]]],
170
+ num_inference_steps: Optional[int] = 50,
171
+ guidance_scale: Optional[float] = 7.5,
172
+ eta: Optional[float] = 0.0,
173
+ seed: Optional[int] = None,
174
+ tile_height: Optional[int] = 512,
175
+ tile_width: Optional[int] = 512,
176
+ tile_row_overlap: Optional[int] = 256,
177
+ tile_col_overlap: Optional[int] = 256,
178
+ guidance_scale_tiles: Optional[List[List[float]]] = None,
179
+ seed_tiles: Optional[List[List[int]]] = None,
180
+ seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
181
+ seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
182
+ cpu_vae: Optional[bool] = False,
183
+ ):
184
+ r"""
185
+ Function to run the diffusion pipeline with tiling support.
186
+
187
+ Args:
188
+ prompt: either a single string (no tiling) or a list of lists with all the prompts to use (one list for each row of tiles). This will also define the tiling structure.
189
+ num_inference_steps: number of diffusions steps.
190
+ guidance_scale: classifier-free guidance.
191
+ seed: general random seed to initialize latents.
192
+ tile_height: height in pixels of each grid tile.
193
+ tile_width: width in pixels of each grid tile.
194
+ tile_row_overlap: number of overlap pixels between tiles in consecutive rows.
195
+ tile_col_overlap: number of overlap pixels between tiles in consecutive columns.
196
+ guidance_scale_tiles: specific weights for classifier-free guidance in each tile.
197
+ guidance_scale_tiles: specific weights for classifier-free guidance in each tile. If None, the value provided in guidance_scale will be used.
198
+ seed_tiles: specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard seed parameter.
199
+ seed_tiles_mode: either "full" "exclusive". If "full", all the latents affected by the tile be overriden. If "exclusive", only the latents that are affected exclusively by this tile (and no other tiles) will be overrriden.
200
+ seed_reroll_regions: a list of tuples in the form (start row, end row, start column, end column, seed) defining regions in pixel space for which the latents will be overriden using the given seed. Takes priority over seed_tiles.
201
+ cpu_vae: the decoder from latent space to pixel space can require too mucho GPU RAM for large images. If you find out of memory errors at the end of the generation process, try setting this parameter to True to run the decoder in CPU. Slower, but should run without memory issues.
202
+
203
+ Examples:
204
+
205
+ Returns:
206
+ A PIL image with the generated image.
207
+
208
+ """
209
+ if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
210
+ raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
211
+ grid_rows = len(prompt)
212
+ grid_cols = len(prompt[0])
213
+ if not all(len(row) == grid_cols for row in prompt):
214
+ raise ValueError("All prompt rows must have the same number of prompt columns")
215
+ if not isinstance(seed_tiles_mode, str) and (
216
+ not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
217
+ ):
218
+ raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
219
+ if isinstance(seed_tiles_mode, str):
220
+ seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
221
+
222
+ modes = [mode.value for mode in self.SeedTilesMode]
223
+ if any(mode not in modes for row in seed_tiles_mode for mode in row):
224
+ raise ValueError(f"Seed tiles mode must be one of {modes}")
225
+ if seed_reroll_regions is None:
226
+ seed_reroll_regions = []
227
+ batch_size = 1
228
+
229
+ # create original noisy latents using the timesteps
230
+ height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
231
+ width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
232
+ latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
233
+ generator = torch.Generator("cuda").manual_seed(seed)
234
+ latents = torch.randn(latents_shape, generator=generator, device=self.device)
235
+
236
+ # overwrite latents for specific tiles if provided
237
+ if seed_tiles is not None:
238
+ for row in range(grid_rows):
239
+ for col in range(grid_cols):
240
+ if (seed_tile := seed_tiles[row][col]) is not None:
241
+ mode = seed_tiles_mode[row][col]
242
+ if mode == self.SeedTilesMode.FULL.value:
243
+ row_init, row_end, col_init, col_end = _tile2latent_indices(
244
+ row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
245
+ )
246
+ else:
247
+ row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
248
+ row,
249
+ col,
250
+ tile_width,
251
+ tile_height,
252
+ tile_row_overlap,
253
+ tile_col_overlap,
254
+ grid_rows,
255
+ grid_cols,
256
+ )
257
+ tile_generator = torch.Generator("cuda").manual_seed(seed_tile)
258
+ tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
259
+ latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
260
+ tile_shape, generator=tile_generator, device=self.device
261
+ )
262
+
263
+ # overwrite again for seed reroll regions
264
+ for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
265
+ row_init, row_end, col_init, col_end = _pixel2latent_indices(
266
+ row_init, row_end, col_init, col_end
267
+ ) # to latent space coordinates
268
+ reroll_generator = torch.Generator("cuda").manual_seed(seed_reroll)
269
+ region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
270
+ latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
271
+ region_shape, generator=reroll_generator, device=self.device
272
+ )
273
+
274
+ # Prepare scheduler
275
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
276
+ extra_set_kwargs = {}
277
+ if accepts_offset:
278
+ extra_set_kwargs["offset"] = 1
279
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
280
+ # if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
281
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
282
+ latents = latents * self.scheduler.sigmas[0]
283
+
284
+ # get prompts text embeddings
285
+ text_input = [
286
+ [
287
+ self.tokenizer(
288
+ col,
289
+ padding="max_length",
290
+ max_length=self.tokenizer.model_max_length,
291
+ truncation=True,
292
+ return_tensors="pt",
293
+ )
294
+ for col in row
295
+ ]
296
+ for row in prompt
297
+ ]
298
+ text_embeddings = [[self.text_encoder(col.input_ids.to(self.device))[0] for col in row] for row in text_input]
299
+
300
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
301
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
302
+ # corresponds to doing no classifier free guidance.
303
+ do_classifier_free_guidance = guidance_scale > 1.0 # TODO: also active if any tile has guidance scale
304
+ # get unconditional embeddings for classifier free guidance
305
+ if do_classifier_free_guidance:
306
+ for i in range(grid_rows):
307
+ for j in range(grid_cols):
308
+ max_length = text_input[i][j].input_ids.shape[-1]
309
+ uncond_input = self.tokenizer(
310
+ [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
311
+ )
312
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
313
+
314
+ # For classifier free guidance, we need to do two forward passes.
315
+ # Here we concatenate the unconditional and text embeddings into a single batch
316
+ # to avoid doing two forward passes
317
+ text_embeddings[i][j] = torch.cat([uncond_embeddings, text_embeddings[i][j]])
318
+
319
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
320
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
321
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
322
+ # and should be between [0, 1]
323
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
324
+ extra_step_kwargs = {}
325
+ if accepts_eta:
326
+ extra_step_kwargs["eta"] = eta
327
+
328
+ # Mask for tile weights strenght
329
+ tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size)
330
+
331
+ # Diffusion timesteps
332
+ for i, t in tqdm(enumerate(self.scheduler.timesteps)):
333
+ # Diffuse each tile
334
+ noise_preds = []
335
+ for row in range(grid_rows):
336
+ noise_preds_row = []
337
+ for col in range(grid_cols):
338
+ px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
339
+ row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
340
+ )
341
+ tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
342
+ # expand the latents if we are doing classifier free guidance
343
+ latent_model_input = torch.cat([tile_latents] * 2) if do_classifier_free_guidance else tile_latents
344
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
345
+ # predict the noise residual
346
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings[row][col])[
347
+ "sample"
348
+ ]
349
+ # perform guidance
350
+ if do_classifier_free_guidance:
351
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
352
+ guidance = (
353
+ guidance_scale
354
+ if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
355
+ else guidance_scale_tiles[row][col]
356
+ )
357
+ noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
358
+ noise_preds_row.append(noise_pred_tile)
359
+ noise_preds.append(noise_preds_row)
360
+ # Stitch noise predictions for all tiles
361
+ noise_pred = torch.zeros(latents.shape, device=self.device)
362
+ contributors = torch.zeros(latents.shape, device=self.device)
363
+ # Add each tile contribution to overall latents
364
+ for row in range(grid_rows):
365
+ for col in range(grid_cols):
366
+ px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
367
+ row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
368
+ )
369
+ noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
370
+ noise_preds[row][col] * tile_weights
371
+ )
372
+ contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
373
+ # Average overlapping areas with more than 1 contributor
374
+ noise_pred /= contributors
375
+
376
+ # compute the previous noisy sample x_t -> x_t-1
377
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
378
+
379
+ # scale and decode the image latents with vae
380
+ image = self.decode_latents(latents, cpu_vae)
381
+
382
+ return {"images": image}
383
+
384
+ def _gaussian_weights(self, tile_width, tile_height, nbatches):
385
+ """Generates a gaussian mask of weights for tile contributions"""
386
+ import numpy as np
387
+ from numpy import exp, pi, sqrt
388
+
389
+ latent_width = tile_width // 8
390
+ latent_height = tile_height // 8
391
+
392
+ var = 0.01
393
+ midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
394
+ x_probs = [
395
+ exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
396
+ for x in range(latent_width)
397
+ ]
398
+ midpoint = latent_height / 2
399
+ y_probs = [
400
+ exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
401
+ for y in range(latent_height)
402
+ ]
403
+
404
+ weights = np.outer(y_probs, x_probs)
405
+ return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))
v0.26.3/multilingual_stable_diffusion.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import Callable, List, Optional, Union
3
+
4
+ import torch
5
+ from transformers import (
6
+ CLIPImageProcessor,
7
+ CLIPTextModel,
8
+ CLIPTokenizer,
9
+ MBart50TokenizerFast,
10
+ MBartForConditionalGeneration,
11
+ pipeline,
12
+ )
13
+
14
+ from diffusers import DiffusionPipeline
15
+ from diffusers.configuration_utils import FrozenDict
16
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
17
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
18
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
19
+ from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
20
+ from diffusers.utils import deprecate, logging
21
+
22
+
23
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
24
+
25
+
26
+ def detect_language(pipe, prompt, batch_size):
27
+ """helper function to detect language(s) of prompt"""
28
+
29
+ if batch_size == 1:
30
+ preds = pipe(prompt, top_k=1, truncation=True, max_length=128)
31
+ return preds[0]["label"]
32
+ else:
33
+ detected_languages = []
34
+ for p in prompt:
35
+ preds = pipe(p, top_k=1, truncation=True, max_length=128)
36
+ detected_languages.append(preds[0]["label"])
37
+
38
+ return detected_languages
39
+
40
+
41
+ def translate_prompt(prompt, translation_tokenizer, translation_model, device):
42
+ """helper function to translate prompt to English"""
43
+
44
+ encoded_prompt = translation_tokenizer(prompt, return_tensors="pt").to(device)
45
+ generated_tokens = translation_model.generate(**encoded_prompt, max_new_tokens=1000)
46
+ en_trans = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
47
+
48
+ return en_trans[0]
49
+
50
+
51
+ class MultilingualStableDiffusion(DiffusionPipeline):
52
+ r"""
53
+ Pipeline for text-to-image generation using Stable Diffusion in different languages.
54
+
55
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
56
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
57
+
58
+ Args:
59
+ detection_pipeline ([`pipeline`]):
60
+ Transformers pipeline to detect prompt's language.
61
+ translation_model ([`MBartForConditionalGeneration`]):
62
+ Model to translate prompt to English, if necessary. Please refer to the
63
+ [model card](https://huggingface.co/docs/transformers/model_doc/mbart) for details.
64
+ translation_tokenizer ([`MBart50TokenizerFast`]):
65
+ Tokenizer of the translation model.
66
+ vae ([`AutoencoderKL`]):
67
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
68
+ text_encoder ([`CLIPTextModel`]):
69
+ Frozen text-encoder. Stable Diffusion uses the text portion of
70
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
71
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
72
+ tokenizer (`CLIPTokenizer`):
73
+ Tokenizer of class
74
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
75
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
76
+ scheduler ([`SchedulerMixin`]):
77
+ A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
78
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
79
+ safety_checker ([`StableDiffusionSafetyChecker`]):
80
+ Classification module that estimates whether generated images could be considered offensive or harmful.
81
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
82
+ feature_extractor ([`CLIPImageProcessor`]):
83
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ detection_pipeline: pipeline,
89
+ translation_model: MBartForConditionalGeneration,
90
+ translation_tokenizer: MBart50TokenizerFast,
91
+ vae: AutoencoderKL,
92
+ text_encoder: CLIPTextModel,
93
+ tokenizer: CLIPTokenizer,
94
+ unet: UNet2DConditionModel,
95
+ scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
96
+ safety_checker: StableDiffusionSafetyChecker,
97
+ feature_extractor: CLIPImageProcessor,
98
+ ):
99
+ super().__init__()
100
+
101
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
102
+ deprecation_message = (
103
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
104
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
105
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
106
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
107
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
108
+ " file"
109
+ )
110
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
111
+ new_config = dict(scheduler.config)
112
+ new_config["steps_offset"] = 1
113
+ scheduler._internal_dict = FrozenDict(new_config)
114
+
115
+ if safety_checker is None:
116
+ logger.warning(
117
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
118
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
119
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
120
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
121
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
122
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
123
+ )
124
+
125
+ self.register_modules(
126
+ detection_pipeline=detection_pipeline,
127
+ translation_model=translation_model,
128
+ translation_tokenizer=translation_tokenizer,
129
+ vae=vae,
130
+ text_encoder=text_encoder,
131
+ tokenizer=tokenizer,
132
+ unet=unet,
133
+ scheduler=scheduler,
134
+ safety_checker=safety_checker,
135
+ feature_extractor=feature_extractor,
136
+ )
137
+
138
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
139
+ r"""
140
+ Enable sliced attention computation.
141
+
142
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
143
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
144
+
145
+ Args:
146
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
147
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
148
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
149
+ `attention_head_dim` must be a multiple of `slice_size`.
150
+ """
151
+ if slice_size == "auto":
152
+ # half the attention head size is usually a good trade-off between
153
+ # speed and memory
154
+ slice_size = self.unet.config.attention_head_dim // 2
155
+ self.unet.set_attention_slice(slice_size)
156
+
157
+ def disable_attention_slicing(self):
158
+ r"""
159
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
160
+ back to computing attention in one step.
161
+ """
162
+ # set slice_size = `None` to disable `attention slicing`
163
+ self.enable_attention_slicing(None)
164
+
165
+ @torch.no_grad()
166
+ def __call__(
167
+ self,
168
+ prompt: Union[str, List[str]],
169
+ height: int = 512,
170
+ width: int = 512,
171
+ num_inference_steps: int = 50,
172
+ guidance_scale: float = 7.5,
173
+ negative_prompt: Optional[Union[str, List[str]]] = None,
174
+ num_images_per_prompt: Optional[int] = 1,
175
+ eta: float = 0.0,
176
+ generator: Optional[torch.Generator] = None,
177
+ latents: Optional[torch.FloatTensor] = None,
178
+ output_type: Optional[str] = "pil",
179
+ return_dict: bool = True,
180
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
181
+ callback_steps: int = 1,
182
+ **kwargs,
183
+ ):
184
+ r"""
185
+ Function invoked when calling the pipeline for generation.
186
+
187
+ Args:
188
+ prompt (`str` or `List[str]`):
189
+ The prompt or prompts to guide the image generation. Can be in different languages.
190
+ height (`int`, *optional*, defaults to 512):
191
+ The height in pixels of the generated image.
192
+ width (`int`, *optional*, defaults to 512):
193
+ The width in pixels of the generated image.
194
+ num_inference_steps (`int`, *optional*, defaults to 50):
195
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
196
+ expense of slower inference.
197
+ guidance_scale (`float`, *optional*, defaults to 7.5):
198
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
199
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
200
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
201
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
202
+ usually at the expense of lower image quality.
203
+ negative_prompt (`str` or `List[str]`, *optional*):
204
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
205
+ if `guidance_scale` is less than `1`).
206
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
207
+ The number of images to generate per prompt.
208
+ eta (`float`, *optional*, defaults to 0.0):
209
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
210
+ [`schedulers.DDIMScheduler`], will be ignored for others.
211
+ generator (`torch.Generator`, *optional*):
212
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
213
+ deterministic.
214
+ latents (`torch.FloatTensor`, *optional*):
215
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
216
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
217
+ tensor will ge generated by sampling using the supplied random `generator`.
218
+ output_type (`str`, *optional*, defaults to `"pil"`):
219
+ The output format of the generate image. Choose between
220
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
221
+ return_dict (`bool`, *optional*, defaults to `True`):
222
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
223
+ plain tuple.
224
+ callback (`Callable`, *optional*):
225
+ A function that will be called every `callback_steps` steps during inference. The function will be
226
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
227
+ callback_steps (`int`, *optional*, defaults to 1):
228
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
229
+ called at every step.
230
+
231
+ Returns:
232
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
233
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
234
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
235
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
236
+ (nsfw) content, according to the `safety_checker`.
237
+ """
238
+ if isinstance(prompt, str):
239
+ batch_size = 1
240
+ elif isinstance(prompt, list):
241
+ batch_size = len(prompt)
242
+ else:
243
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
244
+
245
+ if height % 8 != 0 or width % 8 != 0:
246
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
247
+
248
+ if (callback_steps is None) or (
249
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
250
+ ):
251
+ raise ValueError(
252
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
253
+ f" {type(callback_steps)}."
254
+ )
255
+
256
+ # detect language and translate if necessary
257
+ prompt_language = detect_language(self.detection_pipeline, prompt, batch_size)
258
+ if batch_size == 1 and prompt_language != "en":
259
+ prompt = translate_prompt(prompt, self.translation_tokenizer, self.translation_model, self.device)
260
+
261
+ if isinstance(prompt, list):
262
+ for index in range(batch_size):
263
+ if prompt_language[index] != "en":
264
+ p = translate_prompt(
265
+ prompt[index], self.translation_tokenizer, self.translation_model, self.device
266
+ )
267
+ prompt[index] = p
268
+
269
+ # get prompt text embeddings
270
+ text_inputs = self.tokenizer(
271
+ prompt,
272
+ padding="max_length",
273
+ max_length=self.tokenizer.model_max_length,
274
+ return_tensors="pt",
275
+ )
276
+ text_input_ids = text_inputs.input_ids
277
+
278
+ if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
279
+ removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
280
+ logger.warning(
281
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
282
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
283
+ )
284
+ text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
285
+ text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
286
+
287
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
288
+ bs_embed, seq_len, _ = text_embeddings.shape
289
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
290
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
291
+
292
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
293
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
294
+ # corresponds to doing no classifier free guidance.
295
+ do_classifier_free_guidance = guidance_scale > 1.0
296
+ # get unconditional embeddings for classifier free guidance
297
+ if do_classifier_free_guidance:
298
+ uncond_tokens: List[str]
299
+ if negative_prompt is None:
300
+ uncond_tokens = [""] * batch_size
301
+ elif type(prompt) is not type(negative_prompt):
302
+ raise TypeError(
303
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
304
+ f" {type(prompt)}."
305
+ )
306
+ elif isinstance(negative_prompt, str):
307
+ # detect language and translate it if necessary
308
+ negative_prompt_language = detect_language(self.detection_pipeline, negative_prompt, batch_size)
309
+ if negative_prompt_language != "en":
310
+ negative_prompt = translate_prompt(
311
+ negative_prompt, self.translation_tokenizer, self.translation_model, self.device
312
+ )
313
+ if isinstance(negative_prompt, str):
314
+ uncond_tokens = [negative_prompt]
315
+ elif batch_size != len(negative_prompt):
316
+ raise ValueError(
317
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
318
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
319
+ " the batch size of `prompt`."
320
+ )
321
+ else:
322
+ # detect language and translate it if necessary
323
+ if isinstance(negative_prompt, list):
324
+ negative_prompt_languages = detect_language(self.detection_pipeline, negative_prompt, batch_size)
325
+ for index in range(batch_size):
326
+ if negative_prompt_languages[index] != "en":
327
+ p = translate_prompt(
328
+ negative_prompt[index], self.translation_tokenizer, self.translation_model, self.device
329
+ )
330
+ negative_prompt[index] = p
331
+ uncond_tokens = negative_prompt
332
+
333
+ max_length = text_input_ids.shape[-1]
334
+ uncond_input = self.tokenizer(
335
+ uncond_tokens,
336
+ padding="max_length",
337
+ max_length=max_length,
338
+ truncation=True,
339
+ return_tensors="pt",
340
+ )
341
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
342
+
343
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
344
+ seq_len = uncond_embeddings.shape[1]
345
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
346
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
347
+
348
+ # For classifier free guidance, we need to do two forward passes.
349
+ # Here we concatenate the unconditional and text embeddings into a single batch
350
+ # to avoid doing two forward passes
351
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
352
+
353
+ # get the initial random noise unless the user supplied it
354
+
355
+ # Unlike in other pipelines, latents need to be generated in the target device
356
+ # for 1-to-1 results reproducibility with the CompVis implementation.
357
+ # However this currently doesn't work in `mps`.
358
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
359
+ latents_dtype = text_embeddings.dtype
360
+ if latents is None:
361
+ if self.device.type == "mps":
362
+ # randn does not work reproducibly on mps
363
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
364
+ self.device
365
+ )
366
+ else:
367
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
368
+ else:
369
+ if latents.shape != latents_shape:
370
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
371
+ latents = latents.to(self.device)
372
+
373
+ # set timesteps
374
+ self.scheduler.set_timesteps(num_inference_steps)
375
+
376
+ # Some schedulers like PNDM have timesteps as arrays
377
+ # It's more optimized to move all timesteps to correct device beforehand
378
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
379
+
380
+ # scale the initial noise by the standard deviation required by the scheduler
381
+ latents = latents * self.scheduler.init_noise_sigma
382
+
383
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
384
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
385
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
386
+ # and should be between [0, 1]
387
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
388
+ extra_step_kwargs = {}
389
+ if accepts_eta:
390
+ extra_step_kwargs["eta"] = eta
391
+
392
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
393
+ # expand the latents if we are doing classifier free guidance
394
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
395
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
396
+
397
+ # predict the noise residual
398
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
399
+
400
+ # perform guidance
401
+ if do_classifier_free_guidance:
402
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
403
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
404
+
405
+ # compute the previous noisy sample x_t -> x_t-1
406
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
407
+
408
+ # call the callback, if provided
409
+ if callback is not None and i % callback_steps == 0:
410
+ step_idx = i // getattr(self.scheduler, "order", 1)
411
+ callback(step_idx, t, latents)
412
+
413
+ latents = 1 / 0.18215 * latents
414
+ image = self.vae.decode(latents).sample
415
+
416
+ image = (image / 2 + 0.5).clamp(0, 1)
417
+
418
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
419
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
420
+
421
+ if self.safety_checker is not None:
422
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
423
+ self.device
424
+ )
425
+ image, has_nsfw_concept = self.safety_checker(
426
+ images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
427
+ )
428
+ else:
429
+ has_nsfw_concept = None
430
+
431
+ if output_type == "pil":
432
+ image = self.numpy_to_pil(image)
433
+
434
+ if not return_dict:
435
+ return (image, has_nsfw_concept)
436
+
437
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/one_step_unet.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import torch
3
+
4
+ from diffusers import DiffusionPipeline
5
+
6
+
7
+ class UnetSchedulerOneForwardPipeline(DiffusionPipeline):
8
+ def __init__(self, unet, scheduler):
9
+ super().__init__()
10
+
11
+ self.register_modules(unet=unet, scheduler=scheduler)
12
+
13
+ def __call__(self):
14
+ image = torch.randn(
15
+ (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
16
+ )
17
+ timestep = 1
18
+
19
+ model_output = self.unet(image, timestep).sample
20
+ scheduler_output = self.scheduler.step(model_output, timestep, image).prev_sample
21
+
22
+ result = scheduler_output - scheduler_output + torch.ones_like(scheduler_output)
23
+
24
+ return result
v0.26.3/pipeline_animatediff_controlnet.py ADDED
@@ -0,0 +1,1130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from dataclasses import dataclass
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from PIL import Image
23
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
24
+
25
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
+ from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
27
+ from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel, UNetMotionModel
28
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
29
+ from diffusers.models.unets.unet_motion_model import MotionAdapter
30
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
31
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
32
+ from diffusers.schedulers import (
33
+ DDIMScheduler,
34
+ DPMSolverMultistepScheduler,
35
+ EulerAncestralDiscreteScheduler,
36
+ EulerDiscreteScheduler,
37
+ LMSDiscreteScheduler,
38
+ PNDMScheduler,
39
+ )
40
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
41
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+ EXAMPLE_DOC_STRING = """
47
+ Examples:
48
+ ```py
49
+ >>> import torch
50
+ >>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter
51
+ >>> from diffusers.pipelines import DiffusionPipeline
52
+ >>> from diffusers.schedulers import DPMSolverMultistepScheduler
53
+ >>> from PIL import Image
54
+
55
+ >>> motion_id = "guoyww/animatediff-motion-adapter-v1-5-2"
56
+ >>> adapter = MotionAdapter.from_pretrained(motion_id)
57
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16)
58
+ >>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
59
+
60
+ >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
61
+ >>> pipe = DiffusionPipeline.from_pretrained(
62
+ ... model_id,
63
+ ... motion_adapter=adapter,
64
+ ... controlnet=controlnet,
65
+ ... vae=vae,
66
+ ... custom_pipeline="pipeline_animatediff_controlnet",
67
+ ... ).to(device="cuda", dtype=torch.float16)
68
+ >>> pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained(
69
+ ... model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1, beta_schedule="linear",
70
+ ... )
71
+ >>> pipe.enable_vae_slicing()
72
+
73
+ >>> conditioning_frames = []
74
+ >>> for i in range(1, 16 + 1):
75
+ ... conditioning_frames.append(Image.open(f"frame_{i}.png"))
76
+
77
+ >>> prompt = "astronaut in space, dancing"
78
+ >>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
79
+ >>> result = pipe(
80
+ ... prompt=prompt,
81
+ ... negative_prompt=negative_prompt,
82
+ ... width=512,
83
+ ... height=768,
84
+ ... conditioning_frames=conditioning_frames,
85
+ ... num_inference_steps=12,
86
+ ... )
87
+
88
+ >>> from diffusers.utils import export_to_gif
89
+ >>> export_to_gif(result.frames[0], "result.gif")
90
+ ```
91
+ """
92
+
93
+
94
+ def tensor2vid(video: torch.Tensor, processor, output_type="np"):
95
+ # Based on:
96
+ # https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78
97
+
98
+ batch_size, channels, num_frames, height, width = video.shape
99
+ outputs = []
100
+ for batch_idx in range(batch_size):
101
+ batch_vid = video[batch_idx].permute(1, 0, 2, 3)
102
+ batch_output = processor.postprocess(batch_vid, output_type)
103
+
104
+ outputs.append(batch_output)
105
+
106
+ return outputs
107
+
108
+
109
+ @dataclass
110
+ class AnimateDiffControlNetPipelineOutput(BaseOutput):
111
+ frames: Union[torch.Tensor, np.ndarray]
112
+
113
+
114
+ class AnimateDiffControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin):
115
+ r"""
116
+ Pipeline for text-to-video generation.
117
+
118
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
119
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
120
+
121
+ The pipeline also inherits the following loading methods:
122
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
123
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
124
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
125
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
126
+
127
+ Args:
128
+ vae ([`AutoencoderKL`]):
129
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
130
+ text_encoder ([`CLIPTextModel`]):
131
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
132
+ tokenizer (`CLIPTokenizer`):
133
+ A [`~transformers.CLIPTokenizer`] to tokenize text.
134
+ unet ([`UNet2DConditionModel`]):
135
+ A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
136
+ motion_adapter ([`MotionAdapter`]):
137
+ A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
138
+ scheduler ([`SchedulerMixin`]):
139
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
140
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
141
+ """
142
+
143
+ model_cpu_offload_seq = "text_encoder->unet->vae"
144
+ _optional_components = ["feature_extractor", "image_encoder"]
145
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
146
+
147
+ def __init__(
148
+ self,
149
+ vae: AutoencoderKL,
150
+ text_encoder: CLIPTextModel,
151
+ tokenizer: CLIPTokenizer,
152
+ unet: UNet2DConditionModel,
153
+ motion_adapter: MotionAdapter,
154
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
155
+ scheduler: Union[
156
+ DDIMScheduler,
157
+ PNDMScheduler,
158
+ LMSDiscreteScheduler,
159
+ EulerDiscreteScheduler,
160
+ EulerAncestralDiscreteScheduler,
161
+ DPMSolverMultistepScheduler,
162
+ ],
163
+ feature_extractor: Optional[CLIPImageProcessor] = None,
164
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
165
+ ):
166
+ super().__init__()
167
+ unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
168
+
169
+ if isinstance(controlnet, (list, tuple)):
170
+ controlnet = MultiControlNetModel(controlnet)
171
+
172
+ self.register_modules(
173
+ vae=vae,
174
+ text_encoder=text_encoder,
175
+ tokenizer=tokenizer,
176
+ unet=unet,
177
+ motion_adapter=motion_adapter,
178
+ controlnet=controlnet,
179
+ scheduler=scheduler,
180
+ feature_extractor=feature_extractor,
181
+ image_encoder=image_encoder,
182
+ )
183
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
184
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
185
+ self.control_image_processor = VaeImageProcessor(
186
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
187
+ )
188
+
189
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
190
+ def encode_prompt(
191
+ self,
192
+ prompt,
193
+ device,
194
+ num_images_per_prompt,
195
+ do_classifier_free_guidance,
196
+ negative_prompt=None,
197
+ prompt_embeds: Optional[torch.FloatTensor] = None,
198
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
199
+ lora_scale: Optional[float] = None,
200
+ clip_skip: Optional[int] = None,
201
+ ):
202
+ r"""
203
+ Encodes the prompt into text encoder hidden states.
204
+
205
+ Args:
206
+ prompt (`str` or `List[str]`, *optional*):
207
+ prompt to be encoded
208
+ device: (`torch.device`):
209
+ torch device
210
+ num_images_per_prompt (`int`):
211
+ number of images that should be generated per prompt
212
+ do_classifier_free_guidance (`bool`):
213
+ whether to use classifier free guidance or not
214
+ negative_prompt (`str` or `List[str]`, *optional*):
215
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
216
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
217
+ less than `1`).
218
+ prompt_embeds (`torch.FloatTensor`, *optional*):
219
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
220
+ provided, text embeddings will be generated from `prompt` input argument.
221
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
222
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
223
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
224
+ argument.
225
+ lora_scale (`float`, *optional*):
226
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
227
+ clip_skip (`int`, *optional*):
228
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
229
+ the output of the pre-final layer will be used for computing the prompt embeddings.
230
+ """
231
+ # set lora scale so that monkey patched LoRA
232
+ # function of text encoder can correctly access it
233
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
234
+ self._lora_scale = lora_scale
235
+
236
+ # dynamically adjust the LoRA scale
237
+ if not USE_PEFT_BACKEND:
238
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
239
+ else:
240
+ scale_lora_layers(self.text_encoder, lora_scale)
241
+
242
+ if prompt is not None and isinstance(prompt, str):
243
+ batch_size = 1
244
+ elif prompt is not None and isinstance(prompt, list):
245
+ batch_size = len(prompt)
246
+ else:
247
+ batch_size = prompt_embeds.shape[0]
248
+
249
+ if prompt_embeds is None:
250
+ # textual inversion: procecss multi-vector tokens if necessary
251
+ if isinstance(self, TextualInversionLoaderMixin):
252
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
253
+
254
+ text_inputs = self.tokenizer(
255
+ prompt,
256
+ padding="max_length",
257
+ max_length=self.tokenizer.model_max_length,
258
+ truncation=True,
259
+ return_tensors="pt",
260
+ )
261
+ text_input_ids = text_inputs.input_ids
262
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
263
+
264
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
265
+ text_input_ids, untruncated_ids
266
+ ):
267
+ removed_text = self.tokenizer.batch_decode(
268
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
269
+ )
270
+ logger.warning(
271
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
272
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
273
+ )
274
+
275
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
276
+ attention_mask = text_inputs.attention_mask.to(device)
277
+ else:
278
+ attention_mask = None
279
+
280
+ if clip_skip is None:
281
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
282
+ prompt_embeds = prompt_embeds[0]
283
+ else:
284
+ prompt_embeds = self.text_encoder(
285
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
286
+ )
287
+ # Access the `hidden_states` first, that contains a tuple of
288
+ # all the hidden states from the encoder layers. Then index into
289
+ # the tuple to access the hidden states from the desired layer.
290
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
291
+ # We also need to apply the final LayerNorm here to not mess with the
292
+ # representations. The `last_hidden_states` that we typically use for
293
+ # obtaining the final prompt representations passes through the LayerNorm
294
+ # layer.
295
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
296
+
297
+ if self.text_encoder is not None:
298
+ prompt_embeds_dtype = self.text_encoder.dtype
299
+ elif self.unet is not None:
300
+ prompt_embeds_dtype = self.unet.dtype
301
+ else:
302
+ prompt_embeds_dtype = prompt_embeds.dtype
303
+
304
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
305
+
306
+ bs_embed, seq_len, _ = prompt_embeds.shape
307
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
308
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
309
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
310
+
311
+ # get unconditional embeddings for classifier free guidance
312
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
313
+ uncond_tokens: List[str]
314
+ if negative_prompt is None:
315
+ uncond_tokens = [""] * batch_size
316
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
317
+ raise TypeError(
318
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
319
+ f" {type(prompt)}."
320
+ )
321
+ elif isinstance(negative_prompt, str):
322
+ uncond_tokens = [negative_prompt]
323
+ elif batch_size != len(negative_prompt):
324
+ raise ValueError(
325
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
326
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
327
+ " the batch size of `prompt`."
328
+ )
329
+ else:
330
+ uncond_tokens = negative_prompt
331
+
332
+ # textual inversion: procecss multi-vector tokens if necessary
333
+ if isinstance(self, TextualInversionLoaderMixin):
334
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
335
+
336
+ max_length = prompt_embeds.shape[1]
337
+ uncond_input = self.tokenizer(
338
+ uncond_tokens,
339
+ padding="max_length",
340
+ max_length=max_length,
341
+ truncation=True,
342
+ return_tensors="pt",
343
+ )
344
+
345
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
346
+ attention_mask = uncond_input.attention_mask.to(device)
347
+ else:
348
+ attention_mask = None
349
+
350
+ negative_prompt_embeds = self.text_encoder(
351
+ uncond_input.input_ids.to(device),
352
+ attention_mask=attention_mask,
353
+ )
354
+ negative_prompt_embeds = negative_prompt_embeds[0]
355
+
356
+ if do_classifier_free_guidance:
357
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
358
+ seq_len = negative_prompt_embeds.shape[1]
359
+
360
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
361
+
362
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
363
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
364
+
365
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
366
+ # Retrieve the original scale by scaling back the LoRA layers
367
+ unscale_lora_layers(self.text_encoder, lora_scale)
368
+
369
+ return prompt_embeds, negative_prompt_embeds
370
+
371
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
372
+ def encode_image(self, image, device, num_images_per_prompt):
373
+ dtype = next(self.image_encoder.parameters()).dtype
374
+
375
+ if not isinstance(image, torch.Tensor):
376
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
377
+
378
+ image = image.to(device=device, dtype=dtype)
379
+ image_embeds = self.image_encoder(image).image_embeds
380
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
381
+
382
+ uncond_image_embeds = torch.zeros_like(image_embeds)
383
+ return image_embeds, uncond_image_embeds
384
+
385
+ # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
386
+ def decode_latents(self, latents):
387
+ latents = 1 / self.vae.config.scaling_factor * latents
388
+
389
+ batch_size, channels, num_frames, height, width = latents.shape
390
+ latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
391
+
392
+ image = self.vae.decode(latents).sample
393
+ video = (
394
+ image[None, :]
395
+ .reshape(
396
+ (
397
+ batch_size,
398
+ num_frames,
399
+ -1,
400
+ )
401
+ + image.shape[2:]
402
+ )
403
+ .permute(0, 2, 1, 3, 4)
404
+ )
405
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
406
+ video = video.float()
407
+ return video
408
+
409
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
410
+ def enable_vae_slicing(self):
411
+ r"""
412
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
413
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
414
+ """
415
+ self.vae.enable_slicing()
416
+
417
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
418
+ def disable_vae_slicing(self):
419
+ r"""
420
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
421
+ computing decoding in one step.
422
+ """
423
+ self.vae.disable_slicing()
424
+
425
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
426
+ def enable_vae_tiling(self):
427
+ r"""
428
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
429
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
430
+ processing larger images.
431
+ """
432
+ self.vae.enable_tiling()
433
+
434
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
435
+ def disable_vae_tiling(self):
436
+ r"""
437
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
438
+ computing decoding in one step.
439
+ """
440
+ self.vae.disable_tiling()
441
+
442
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
443
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
444
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
445
+
446
+ The suffixes after the scaling factors represent the stages where they are being applied.
447
+
448
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
449
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
450
+
451
+ Args:
452
+ s1 (`float`):
453
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
454
+ mitigate "oversmoothing effect" in the enhanced denoising process.
455
+ s2 (`float`):
456
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
457
+ mitigate "oversmoothing effect" in the enhanced denoising process.
458
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
459
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
460
+ """
461
+ if not hasattr(self, "unet"):
462
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
463
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
464
+
465
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
466
+ def disable_freeu(self):
467
+ """Disables the FreeU mechanism if enabled."""
468
+ self.unet.disable_freeu()
469
+
470
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
471
+ def prepare_extra_step_kwargs(self, generator, eta):
472
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
473
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
474
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
475
+ # and should be between [0, 1]
476
+
477
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
478
+ extra_step_kwargs = {}
479
+ if accepts_eta:
480
+ extra_step_kwargs["eta"] = eta
481
+
482
+ # check if the scheduler accepts generator
483
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
484
+ if accepts_generator:
485
+ extra_step_kwargs["generator"] = generator
486
+ return extra_step_kwargs
487
+
488
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
489
+ def check_inputs(
490
+ self,
491
+ prompt,
492
+ height,
493
+ width,
494
+ num_frames,
495
+ callback_steps,
496
+ negative_prompt=None,
497
+ prompt_embeds=None,
498
+ negative_prompt_embeds=None,
499
+ callback_on_step_end_tensor_inputs=None,
500
+ image=None,
501
+ controlnet_conditioning_scale=1.0,
502
+ control_guidance_start=0.0,
503
+ control_guidance_end=1.0,
504
+ ):
505
+ if height % 8 != 0 or width % 8 != 0:
506
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
507
+
508
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
509
+ raise ValueError(
510
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
511
+ f" {type(callback_steps)}."
512
+ )
513
+ if callback_on_step_end_tensor_inputs is not None and not all(
514
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
515
+ ):
516
+ raise ValueError(
517
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
518
+ )
519
+
520
+ if prompt is not None and prompt_embeds is not None:
521
+ raise ValueError(
522
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
523
+ " only forward one of the two."
524
+ )
525
+ elif prompt is None and prompt_embeds is None:
526
+ raise ValueError(
527
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
528
+ )
529
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
530
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
531
+
532
+ if negative_prompt is not None and negative_prompt_embeds is not None:
533
+ raise ValueError(
534
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
535
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
536
+ )
537
+
538
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
539
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
540
+ raise ValueError(
541
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
542
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
543
+ f" {negative_prompt_embeds.shape}."
544
+ )
545
+
546
+ # `prompt` needs more sophisticated handling when there are multiple
547
+ # conditionings.
548
+ if isinstance(self.controlnet, MultiControlNetModel):
549
+ if isinstance(prompt, list):
550
+ logger.warning(
551
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
552
+ " prompts. The conditionings will be fixed across the prompts."
553
+ )
554
+
555
+ # Check `image`
556
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
557
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
558
+ )
559
+ if (
560
+ isinstance(self.controlnet, ControlNetModel)
561
+ or is_compiled
562
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
563
+ ):
564
+ if not isinstance(image, list):
565
+ raise TypeError(f"For single controlnet, `image` must be of type `list` but got {type(image)}")
566
+ if len(image) != num_frames:
567
+ raise ValueError(f"Excepted image to have length {num_frames} but got {len(image)=}")
568
+ elif (
569
+ isinstance(self.controlnet, MultiControlNetModel)
570
+ or is_compiled
571
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
572
+ ):
573
+ if not isinstance(image, list) or not isinstance(image[0], list):
574
+ raise TypeError(f"For multiple controlnets: `image` must be type list of lists but got {type(image)=}")
575
+ if len(image[0]) != num_frames:
576
+ raise ValueError(f"Expected length of image sublist as {num_frames} but got {len(image[0])=}")
577
+ if any(len(img) != len(image[0]) for img in image):
578
+ raise ValueError("All conditioning frame batches for multicontrolnet must be same size")
579
+ else:
580
+ assert False
581
+
582
+ # Check `controlnet_conditioning_scale`
583
+ if (
584
+ isinstance(self.controlnet, ControlNetModel)
585
+ or is_compiled
586
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
587
+ ):
588
+ if not isinstance(controlnet_conditioning_scale, float):
589
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
590
+ elif (
591
+ isinstance(self.controlnet, MultiControlNetModel)
592
+ or is_compiled
593
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
594
+ ):
595
+ if isinstance(controlnet_conditioning_scale, list):
596
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
597
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
598
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
599
+ self.controlnet.nets
600
+ ):
601
+ raise ValueError(
602
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
603
+ " the same length as the number of controlnets"
604
+ )
605
+ else:
606
+ assert False
607
+
608
+ if not isinstance(control_guidance_start, (tuple, list)):
609
+ control_guidance_start = [control_guidance_start]
610
+
611
+ if not isinstance(control_guidance_end, (tuple, list)):
612
+ control_guidance_end = [control_guidance_end]
613
+
614
+ if len(control_guidance_start) != len(control_guidance_end):
615
+ raise ValueError(
616
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
617
+ )
618
+
619
+ if isinstance(self.controlnet, MultiControlNetModel):
620
+ if len(control_guidance_start) != len(self.controlnet.nets):
621
+ raise ValueError(
622
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
623
+ )
624
+
625
+ for start, end in zip(control_guidance_start, control_guidance_end):
626
+ if start >= end:
627
+ raise ValueError(
628
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
629
+ )
630
+ if start < 0.0:
631
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
632
+ if end > 1.0:
633
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
634
+
635
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
636
+ def check_image(self, image, prompt, prompt_embeds):
637
+ image_is_pil = isinstance(image, Image.Image)
638
+ image_is_tensor = isinstance(image, torch.Tensor)
639
+ image_is_np = isinstance(image, np.ndarray)
640
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image)
641
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
642
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
643
+
644
+ if (
645
+ not image_is_pil
646
+ and not image_is_tensor
647
+ and not image_is_np
648
+ and not image_is_pil_list
649
+ and not image_is_tensor_list
650
+ and not image_is_np_list
651
+ ):
652
+ raise TypeError(
653
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
654
+ )
655
+
656
+ if image_is_pil:
657
+ image_batch_size = 1
658
+ else:
659
+ image_batch_size = len(image)
660
+
661
+ if prompt is not None and isinstance(prompt, str):
662
+ prompt_batch_size = 1
663
+ elif prompt is not None and isinstance(prompt, list):
664
+ prompt_batch_size = len(prompt)
665
+ elif prompt_embeds is not None:
666
+ prompt_batch_size = prompt_embeds.shape[0]
667
+
668
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
669
+ raise ValueError(
670
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
671
+ )
672
+
673
+ # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
674
+ def prepare_latents(
675
+ self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
676
+ ):
677
+ shape = (
678
+ batch_size,
679
+ num_channels_latents,
680
+ num_frames,
681
+ height // self.vae_scale_factor,
682
+ width // self.vae_scale_factor,
683
+ )
684
+ if isinstance(generator, list) and len(generator) != batch_size:
685
+ raise ValueError(
686
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
687
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
688
+ )
689
+
690
+ if latents is None:
691
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
692
+ else:
693
+ latents = latents.to(device)
694
+
695
+ # scale the initial noise by the standard deviation required by the scheduler
696
+ latents = latents * self.scheduler.init_noise_sigma
697
+ return latents
698
+
699
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
700
+ def prepare_image(
701
+ self,
702
+ image,
703
+ width,
704
+ height,
705
+ batch_size,
706
+ num_images_per_prompt,
707
+ device,
708
+ dtype,
709
+ do_classifier_free_guidance=False,
710
+ guess_mode=False,
711
+ ):
712
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
713
+ image_batch_size = image.shape[0]
714
+
715
+ if image_batch_size == 1:
716
+ repeat_by = batch_size
717
+ else:
718
+ # image batch size is the same as prompt batch size
719
+ repeat_by = num_images_per_prompt
720
+
721
+ image = image.repeat_interleave(repeat_by, dim=0)
722
+
723
+ image = image.to(device=device, dtype=dtype)
724
+
725
+ if do_classifier_free_guidance and not guess_mode:
726
+ image = torch.cat([image] * 2)
727
+
728
+ return image
729
+
730
+ @property
731
+ def guidance_scale(self):
732
+ return self._guidance_scale
733
+
734
+ @property
735
+ def clip_skip(self):
736
+ return self._clip_skip
737
+
738
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
739
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
740
+ # corresponds to doing no classifier free guidance.
741
+ @property
742
+ def do_classifier_free_guidance(self):
743
+ return self._guidance_scale > 1
744
+
745
+ @property
746
+ def cross_attention_kwargs(self):
747
+ return self._cross_attention_kwargs
748
+
749
+ @property
750
+ def num_timesteps(self):
751
+ return self._num_timesteps
752
+
753
+ @torch.no_grad()
754
+ def __call__(
755
+ self,
756
+ prompt: Union[str, List[str]] = None,
757
+ num_frames: Optional[int] = 16,
758
+ height: Optional[int] = None,
759
+ width: Optional[int] = None,
760
+ num_inference_steps: int = 50,
761
+ guidance_scale: float = 7.5,
762
+ negative_prompt: Optional[Union[str, List[str]]] = None,
763
+ num_videos_per_prompt: Optional[int] = 1,
764
+ eta: float = 0.0,
765
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
766
+ latents: Optional[torch.FloatTensor] = None,
767
+ prompt_embeds: Optional[torch.FloatTensor] = None,
768
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
769
+ ip_adapter_image: Optional[PipelineImageInput] = None,
770
+ conditioning_frames: Optional[List[PipelineImageInput]] = None,
771
+ output_type: Optional[str] = "pil",
772
+ return_dict: bool = True,
773
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
774
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
775
+ guess_mode: bool = False,
776
+ control_guidance_start: Union[float, List[float]] = 0.0,
777
+ control_guidance_end: Union[float, List[float]] = 1.0,
778
+ clip_skip: Optional[int] = None,
779
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
780
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
781
+ **kwargs,
782
+ ):
783
+ r"""
784
+ The call function to the pipeline for generation.
785
+
786
+ Args:
787
+ prompt (`str` or `List[str]`, *optional*):
788
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
789
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
790
+ The height in pixels of the generated video.
791
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
792
+ The width in pixels of the generated video.
793
+ num_frames (`int`, *optional*, defaults to 16):
794
+ The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
795
+ amounts to 2 seconds of video.
796
+ num_inference_steps (`int`, *optional*, defaults to 50):
797
+ The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
798
+ expense of slower inference.
799
+ guidance_scale (`float`, *optional*, defaults to 7.5):
800
+ A higher guidance scale value encourages the model to generate images closely linked to the text
801
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
802
+ negative_prompt (`str` or `List[str]`, *optional*):
803
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
804
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
805
+ eta (`float`, *optional*, defaults to 0.0):
806
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
807
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
808
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
809
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
810
+ generation deterministic.
811
+ latents (`torch.FloatTensor`, *optional*):
812
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
813
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
814
+ tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
815
+ `(batch_size, num_channel, num_frames, height, width)`.
816
+ prompt_embeds (`torch.FloatTensor`, *optional*):
817
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
818
+ provided, text embeddings are generated from the `prompt` input argument.
819
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
820
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
821
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
822
+ ip_adapter_image (`PipelineImageInput`, *optional*):
823
+ Optional image input to work with IP Adapters.
824
+ conditioning_frames (`List[PipelineImageInput]`, *optional*):
825
+ The ControlNet input condition to provide guidance to the `unet` for generation. If multiple ControlNets
826
+ are specified, images must be passed as a list such that each element of the list can be correctly
827
+ batched for input to a single ControlNet.
828
+ output_type (`str`, *optional*, defaults to `"pil"`):
829
+ The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or
830
+ `np.array`.
831
+ return_dict (`bool`, *optional*, defaults to `True`):
832
+ Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
833
+ of a plain tuple.
834
+ cross_attention_kwargs (`dict`, *optional*):
835
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
836
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
837
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
838
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
839
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
840
+ the corresponding scale as a list.
841
+ guess_mode (`bool`, *optional*, defaults to `False`):
842
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
843
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
844
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
845
+ The percentage of total steps at which the ControlNet starts applying.
846
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
847
+ The percentage of total steps at which the ControlNet stops applying.
848
+ clip_skip (`int`, *optional*):
849
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
850
+ the output of the pre-final layer will be used for computing the prompt embeddings.
851
+ allback_on_step_end (`Callable`, *optional*):
852
+ A function that calls at the end of each denoising steps during the inference. The function is called
853
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
854
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
855
+ `callback_on_step_end_tensor_inputs`.
856
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
857
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
858
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
859
+ `._callback_tensor_inputs` attribute of your pipeine class.
860
+
861
+ Examples:
862
+
863
+ Returns:
864
+ [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`:
865
+ If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is
866
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
867
+ """
868
+
869
+ callback = kwargs.pop("callback", None)
870
+ callback_steps = kwargs.pop("callback_steps", None)
871
+
872
+ if callback is not None:
873
+ deprecate(
874
+ "callback",
875
+ "1.0.0",
876
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
877
+ )
878
+ if callback_steps is not None:
879
+ deprecate(
880
+ "callback_steps",
881
+ "1.0.0",
882
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
883
+ )
884
+
885
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
886
+
887
+ # align format for control guidance
888
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
889
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
890
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
891
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
892
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
893
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
894
+ control_guidance_start, control_guidance_end = (
895
+ mult * [control_guidance_start],
896
+ mult * [control_guidance_end],
897
+ )
898
+
899
+ # 0. Default height and width to unet
900
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
901
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
902
+
903
+ num_videos_per_prompt = 1
904
+
905
+ # 1. Check inputs. Raise error if not correct
906
+ self.check_inputs(
907
+ prompt=prompt,
908
+ height=height,
909
+ width=width,
910
+ num_frames=num_frames,
911
+ callback_steps=callback_steps,
912
+ negative_prompt=negative_prompt,
913
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
914
+ prompt_embeds=prompt_embeds,
915
+ negative_prompt_embeds=negative_prompt_embeds,
916
+ image=conditioning_frames,
917
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
918
+ control_guidance_start=control_guidance_start,
919
+ control_guidance_end=control_guidance_end,
920
+ )
921
+
922
+ self._guidance_scale = guidance_scale
923
+ self._clip_skip = clip_skip
924
+ self._cross_attention_kwargs = cross_attention_kwargs
925
+
926
+ # 2. Define call parameters
927
+ if prompt is not None and isinstance(prompt, str):
928
+ batch_size = 1
929
+ elif prompt is not None and isinstance(prompt, list):
930
+ batch_size = len(prompt)
931
+ else:
932
+ batch_size = prompt_embeds.shape[0]
933
+
934
+ device = self._execution_device
935
+
936
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
937
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
938
+
939
+ global_pool_conditions = (
940
+ controlnet.config.global_pool_conditions
941
+ if isinstance(controlnet, ControlNetModel)
942
+ else controlnet.nets[0].config.global_pool_conditions
943
+ )
944
+ guess_mode = guess_mode or global_pool_conditions
945
+
946
+ # 3. Encode input prompt
947
+ text_encoder_lora_scale = (
948
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
949
+ )
950
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
951
+ prompt,
952
+ device,
953
+ num_videos_per_prompt,
954
+ self.do_classifier_free_guidance,
955
+ negative_prompt,
956
+ prompt_embeds=prompt_embeds,
957
+ negative_prompt_embeds=negative_prompt_embeds,
958
+ lora_scale=text_encoder_lora_scale,
959
+ clip_skip=self.clip_skip,
960
+ )
961
+ # For classifier free guidance, we need to do two forward passes.
962
+ # Here we concatenate the unconditional and text embeddings into a single batch
963
+ # to avoid doing two forward passes
964
+ if self.do_classifier_free_guidance:
965
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
966
+
967
+ if ip_adapter_image is not None:
968
+ image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_videos_per_prompt)
969
+ if self.do_classifier_free_guidance:
970
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
971
+
972
+ if isinstance(controlnet, ControlNetModel):
973
+ conditioning_frames = self.prepare_image(
974
+ image=conditioning_frames,
975
+ width=width,
976
+ height=height,
977
+ batch_size=batch_size * num_videos_per_prompt * num_frames,
978
+ num_images_per_prompt=num_videos_per_prompt,
979
+ device=device,
980
+ dtype=controlnet.dtype,
981
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
982
+ guess_mode=guess_mode,
983
+ )
984
+ elif isinstance(controlnet, MultiControlNetModel):
985
+ cond_prepared_frames = []
986
+ for frame_ in conditioning_frames:
987
+ prepared_frame = self.prepare_image(
988
+ image=frame_,
989
+ width=width,
990
+ height=height,
991
+ batch_size=batch_size * num_videos_per_prompt * num_frames,
992
+ num_images_per_prompt=num_videos_per_prompt,
993
+ device=device,
994
+ dtype=controlnet.dtype,
995
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
996
+ guess_mode=guess_mode,
997
+ )
998
+ cond_prepared_frames.append(prepared_frame)
999
+ conditioning_frames = cond_prepared_frames
1000
+ else:
1001
+ assert False
1002
+
1003
+ # 4. Prepare timesteps
1004
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1005
+ timesteps = self.scheduler.timesteps
1006
+ self._num_timesteps = len(timesteps)
1007
+
1008
+ # 5. Prepare latent variables
1009
+ num_channels_latents = self.unet.config.in_channels
1010
+ latents = self.prepare_latents(
1011
+ batch_size * num_videos_per_prompt,
1012
+ num_channels_latents,
1013
+ num_frames,
1014
+ height,
1015
+ width,
1016
+ prompt_embeds.dtype,
1017
+ device,
1018
+ generator,
1019
+ latents,
1020
+ )
1021
+
1022
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1023
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1024
+
1025
+ # 7. Add image embeds for IP-Adapter
1026
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
1027
+
1028
+ # 7.1 Create tensor stating which controlnets to keep
1029
+ controlnet_keep = []
1030
+ for i in range(len(timesteps)):
1031
+ keeps = [
1032
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1033
+ for s, e in zip(control_guidance_start, control_guidance_end)
1034
+ ]
1035
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1036
+
1037
+ # Denoising loop
1038
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1039
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1040
+ for i, t in enumerate(timesteps):
1041
+ # expand the latents if we are doing classifier free guidance
1042
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1043
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1044
+
1045
+ if guess_mode and self.do_classifier_free_guidance:
1046
+ # Infer ControlNet only for the conditional batch.
1047
+ control_model_input = latents
1048
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1049
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1050
+ else:
1051
+ control_model_input = latent_model_input
1052
+ controlnet_prompt_embeds = prompt_embeds
1053
+ controlnet_prompt_embeds = controlnet_prompt_embeds.repeat_interleave(num_frames, dim=0)
1054
+
1055
+ if isinstance(controlnet_keep[i], list):
1056
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1057
+ else:
1058
+ controlnet_cond_scale = controlnet_conditioning_scale
1059
+ if isinstance(controlnet_cond_scale, list):
1060
+ controlnet_cond_scale = controlnet_cond_scale[0]
1061
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1062
+
1063
+ control_model_input = torch.transpose(control_model_input, 1, 2)
1064
+ control_model_input = control_model_input.reshape(
1065
+ (-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])
1066
+ )
1067
+
1068
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1069
+ control_model_input,
1070
+ t,
1071
+ encoder_hidden_states=controlnet_prompt_embeds,
1072
+ controlnet_cond=conditioning_frames,
1073
+ conditioning_scale=cond_scale,
1074
+ guess_mode=guess_mode,
1075
+ return_dict=False,
1076
+ )
1077
+
1078
+ # predict the noise residual
1079
+ noise_pred = self.unet(
1080
+ latent_model_input,
1081
+ t,
1082
+ encoder_hidden_states=prompt_embeds,
1083
+ cross_attention_kwargs=self.cross_attention_kwargs,
1084
+ added_cond_kwargs=added_cond_kwargs,
1085
+ down_block_additional_residuals=down_block_res_samples,
1086
+ mid_block_additional_residual=mid_block_res_sample,
1087
+ ).sample
1088
+
1089
+ # perform guidance
1090
+ if self.do_classifier_free_guidance:
1091
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1092
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1093
+
1094
+ # compute the previous noisy sample x_t -> x_t-1
1095
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1096
+
1097
+ if callback_on_step_end is not None:
1098
+ callback_kwargs = {}
1099
+ for k in callback_on_step_end_tensor_inputs:
1100
+ callback_kwargs[k] = locals()[k]
1101
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1102
+
1103
+ latents = callback_outputs.pop("latents", latents)
1104
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1105
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1106
+
1107
+ # call the callback, if provided
1108
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1109
+ progress_bar.update()
1110
+ if callback is not None and i % callback_steps == 0:
1111
+ callback(i, t, latents)
1112
+
1113
+ if output_type == "latent":
1114
+ return AnimateDiffControlNetPipelineOutput(frames=latents)
1115
+
1116
+ # Post-processing
1117
+ video_tensor = self.decode_latents(latents)
1118
+
1119
+ if output_type == "pt":
1120
+ video = video_tensor
1121
+ else:
1122
+ video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
1123
+
1124
+ # Offload all models
1125
+ self.maybe_free_model_hooks()
1126
+
1127
+ if not return_dict:
1128
+ return (video,)
1129
+
1130
+ return AnimateDiffControlNetPipelineOutput(frames=video)
v0.26.3/pipeline_animatediff_img2video.py ADDED
@@ -0,0 +1,989 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from dataclasses import dataclass
17
+ from types import FunctionType
18
+ from typing import Any, Callable, Dict, List, Optional, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
23
+
24
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
25
+ from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel, UNetMotionModel
27
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
28
+ from diffusers.models.unet_motion_model import MotionAdapter
29
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
30
+ from diffusers.schedulers import (
31
+ DDIMScheduler,
32
+ DPMSolverMultistepScheduler,
33
+ EulerAncestralDiscreteScheduler,
34
+ EulerDiscreteScheduler,
35
+ LMSDiscreteScheduler,
36
+ PNDMScheduler,
37
+ )
38
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers
39
+ from diffusers.utils.torch_utils import randn_tensor
40
+
41
+
42
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
43
+
44
+ EXAMPLE_DOC_STRING = """
45
+ Examples:
46
+ ```py
47
+ >>> import torch
48
+ >>> from diffusers import MotionAdapter, DiffusionPipeline, DDIMScheduler
49
+ >>> from diffusers.utils import export_to_gif, load_image
50
+
51
+ >>> adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
52
+ >>> pipe = DiffusionPipeline.from_pretrained("SG161222/Realistic_Vision_V5.1_noVAE", motion_adapter=adapter, custom_pipeline="pipeline_animatediff_img2video").to("cuda")
53
+ >>> pipe.scheduler = DDIMScheduler(beta_schedule="linear", steps_offset=1, clip_sample=False, timespace_spacing="linspace")
54
+
55
+ >>> image = load_image("snail.png")
56
+ >>> output = pipe(image=image, prompt="A snail moving on the ground", strength=0.8, latent_interpolation_method="slerp")
57
+ >>> frames = output.frames[0]
58
+ >>> export_to_gif(frames, "animation.gif")
59
+ ```
60
+ """
61
+
62
+
63
+ def lerp(
64
+ v0: torch.Tensor,
65
+ v1: torch.Tensor,
66
+ t: Union[float, torch.Tensor],
67
+ ) -> torch.Tensor:
68
+ r"""
69
+ Linear Interpolation between two tensors.
70
+
71
+ Args:
72
+ v0 (`torch.Tensor`): First tensor.
73
+ v1 (`torch.Tensor`): Second tensor.
74
+ t: (`float` or `torch.Tensor`): Interpolation factor.
75
+ """
76
+ t_is_float = False
77
+ input_device = v0.device
78
+ v0 = v0.cpu().numpy()
79
+ v1 = v1.cpu().numpy()
80
+
81
+ if isinstance(t, torch.Tensor):
82
+ t = t.cpu().numpy()
83
+ else:
84
+ t_is_float = True
85
+ t = np.array([t], dtype=v0.dtype)
86
+
87
+ t = t[..., None]
88
+ v0 = v0[None, ...]
89
+ v1 = v1[None, ...]
90
+ v2 = (1 - t) * v0 + t * v1
91
+
92
+ if t_is_float and v0.ndim > 1:
93
+ assert v2.shape[0] == 1
94
+ v2 = np.squeeze(v2, axis=0)
95
+
96
+ v2 = torch.from_numpy(v2).to(input_device)
97
+ return v2
98
+
99
+
100
+ def slerp(
101
+ v0: torch.Tensor,
102
+ v1: torch.Tensor,
103
+ t: Union[float, torch.Tensor],
104
+ DOT_THRESHOLD: float = 0.9995,
105
+ ) -> torch.Tensor:
106
+ r"""
107
+ Spherical Linear Interpolation between two tensors.
108
+
109
+ Args:
110
+ v0 (`torch.Tensor`): First tensor.
111
+ v1 (`torch.Tensor`): Second tensor.
112
+ t: (`float` or `torch.Tensor`): Interpolation factor.
113
+ DOT_THRESHOLD (`float`):
114
+ Dot product threshold exceeding which linear interpolation will be used
115
+ because input tensors are close to parallel.
116
+ """
117
+ t_is_float = False
118
+ input_device = v0.device
119
+ v0 = v0.cpu().numpy()
120
+ v1 = v1.cpu().numpy()
121
+
122
+ if isinstance(t, torch.Tensor):
123
+ t = t.cpu().numpy()
124
+ else:
125
+ t_is_float = True
126
+ t = np.array([t], dtype=v0.dtype)
127
+
128
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
129
+
130
+ if np.abs(dot) > DOT_THRESHOLD:
131
+ # v0 and v1 are close to parallel, so use linear interpolation instead
132
+ v2 = lerp(v0, v1, t)
133
+ else:
134
+ theta_0 = np.arccos(dot)
135
+ sin_theta_0 = np.sin(theta_0)
136
+ theta_t = theta_0 * t
137
+ sin_theta_t = np.sin(theta_t)
138
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
139
+ s1 = sin_theta_t / sin_theta_0
140
+ s0 = s0[..., None]
141
+ s1 = s1[..., None]
142
+ v0 = v0[None, ...]
143
+ v1 = v1[None, ...]
144
+ v2 = s0 * v0 + s1 * v1
145
+
146
+ if t_is_float and v0.ndim > 1:
147
+ assert v2.shape[0] == 1
148
+ v2 = np.squeeze(v2, axis=0)
149
+
150
+ v2 = torch.from_numpy(v2).to(input_device)
151
+ return v2
152
+
153
+
154
+ def tensor2vid(video: torch.Tensor, processor, output_type="np"):
155
+ # Based on:
156
+ # https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78
157
+
158
+ batch_size, channels, num_frames, height, width = video.shape
159
+ outputs = []
160
+ for batch_idx in range(batch_size):
161
+ batch_vid = video[batch_idx].permute(1, 0, 2, 3)
162
+ batch_output = processor.postprocess(batch_vid, output_type)
163
+
164
+ outputs.append(batch_output)
165
+
166
+ return outputs
167
+
168
+
169
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
170
+ def retrieve_latents(
171
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
172
+ ):
173
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
174
+ return encoder_output.latent_dist.sample(generator)
175
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
176
+ return encoder_output.latent_dist.mode()
177
+ elif hasattr(encoder_output, "latents"):
178
+ return encoder_output.latents
179
+ else:
180
+ raise AttributeError("Could not access latents of provided encoder_output")
181
+
182
+
183
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
184
+ def retrieve_timesteps(
185
+ scheduler,
186
+ num_inference_steps: Optional[int] = None,
187
+ device: Optional[Union[str, torch.device]] = None,
188
+ timesteps: Optional[List[int]] = None,
189
+ **kwargs,
190
+ ):
191
+ """
192
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
193
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
194
+
195
+ Args:
196
+ scheduler (`SchedulerMixin`):
197
+ The scheduler to get timesteps from.
198
+ num_inference_steps (`int`):
199
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
200
+ `timesteps` must be `None`.
201
+ device (`str` or `torch.device`, *optional*):
202
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
203
+ timesteps (`List[int]`, *optional*):
204
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
205
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
206
+ must be `None`.
207
+
208
+ Returns:
209
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
210
+ second element is the number of inference steps.
211
+ """
212
+ if timesteps is not None:
213
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
214
+ if not accepts_timesteps:
215
+ raise ValueError(
216
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
217
+ f" timestep schedules. Please check whether you are using the correct scheduler."
218
+ )
219
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
220
+ timesteps = scheduler.timesteps
221
+ num_inference_steps = len(timesteps)
222
+ else:
223
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
224
+ timesteps = scheduler.timesteps
225
+ return timesteps, num_inference_steps
226
+
227
+
228
+ @dataclass
229
+ class AnimateDiffImgToVideoPipelineOutput(BaseOutput):
230
+ frames: Union[torch.Tensor, np.ndarray]
231
+
232
+
233
+ class AnimateDiffImgToVideoPipeline(DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin):
234
+ r"""
235
+ Pipeline for text-to-video generation.
236
+
237
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
238
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
239
+
240
+ The pipeline also inherits the following loading methods:
241
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
242
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
243
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
244
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
245
+
246
+ Args:
247
+ vae ([`AutoencoderKL`]):
248
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
249
+ text_encoder ([`CLIPTextModel`]):
250
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
251
+ tokenizer (`CLIPTokenizer`):
252
+ A [`~transformers.CLIPTokenizer`] to tokenize text.
253
+ unet ([`UNet2DConditionModel`]):
254
+ A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
255
+ motion_adapter ([`MotionAdapter`]):
256
+ A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
257
+ scheduler ([`SchedulerMixin`]):
258
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
259
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
260
+ """
261
+
262
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
263
+ _optional_components = ["feature_extractor", "image_encoder"]
264
+
265
+ def __init__(
266
+ self,
267
+ vae: AutoencoderKL,
268
+ text_encoder: CLIPTextModel,
269
+ tokenizer: CLIPTokenizer,
270
+ unet: UNet2DConditionModel,
271
+ motion_adapter: MotionAdapter,
272
+ scheduler: Union[
273
+ DDIMScheduler,
274
+ PNDMScheduler,
275
+ LMSDiscreteScheduler,
276
+ EulerDiscreteScheduler,
277
+ EulerAncestralDiscreteScheduler,
278
+ DPMSolverMultistepScheduler,
279
+ ],
280
+ feature_extractor: CLIPImageProcessor = None,
281
+ image_encoder: CLIPVisionModelWithProjection = None,
282
+ ):
283
+ super().__init__()
284
+ unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
285
+
286
+ self.register_modules(
287
+ vae=vae,
288
+ text_encoder=text_encoder,
289
+ tokenizer=tokenizer,
290
+ unet=unet,
291
+ motion_adapter=motion_adapter,
292
+ scheduler=scheduler,
293
+ feature_extractor=feature_extractor,
294
+ image_encoder=image_encoder,
295
+ )
296
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
297
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
298
+
299
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
300
+ def encode_prompt(
301
+ self,
302
+ prompt,
303
+ device,
304
+ num_images_per_prompt,
305
+ do_classifier_free_guidance,
306
+ negative_prompt=None,
307
+ prompt_embeds: Optional[torch.FloatTensor] = None,
308
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
309
+ lora_scale: Optional[float] = None,
310
+ clip_skip: Optional[int] = None,
311
+ ):
312
+ r"""
313
+ Encodes the prompt into text encoder hidden states.
314
+
315
+ Args:
316
+ prompt (`str` or `List[str]`, *optional*):
317
+ prompt to be encoded
318
+ device: (`torch.device`):
319
+ torch device
320
+ num_images_per_prompt (`int`):
321
+ number of images that should be generated per prompt
322
+ do_classifier_free_guidance (`bool`):
323
+ whether to use classifier free guidance or not
324
+ negative_prompt (`str` or `List[str]`, *optional*):
325
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
326
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
327
+ less than `1`).
328
+ prompt_embeds (`torch.FloatTensor`, *optional*):
329
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
330
+ provided, text embeddings will be generated from `prompt` input argument.
331
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
332
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
333
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
334
+ argument.
335
+ lora_scale (`float`, *optional*):
336
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
337
+ clip_skip (`int`, *optional*):
338
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
339
+ the output of the pre-final layer will be used for computing the prompt embeddings.
340
+ """
341
+ # set lora scale so that monkey patched LoRA
342
+ # function of text encoder can correctly access it
343
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
344
+ self._lora_scale = lora_scale
345
+
346
+ # dynamically adjust the LoRA scale
347
+ if not USE_PEFT_BACKEND:
348
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
349
+ else:
350
+ scale_lora_layers(self.text_encoder, lora_scale)
351
+
352
+ if prompt is not None and isinstance(prompt, str):
353
+ batch_size = 1
354
+ elif prompt is not None and isinstance(prompt, list):
355
+ batch_size = len(prompt)
356
+ else:
357
+ batch_size = prompt_embeds.shape[0]
358
+
359
+ if prompt_embeds is None:
360
+ # textual inversion: procecss multi-vector tokens if necessary
361
+ if isinstance(self, TextualInversionLoaderMixin):
362
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
363
+
364
+ text_inputs = self.tokenizer(
365
+ prompt,
366
+ padding="max_length",
367
+ max_length=self.tokenizer.model_max_length,
368
+ truncation=True,
369
+ return_tensors="pt",
370
+ )
371
+ text_input_ids = text_inputs.input_ids
372
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
373
+
374
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
375
+ text_input_ids, untruncated_ids
376
+ ):
377
+ removed_text = self.tokenizer.batch_decode(
378
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
379
+ )
380
+ logger.warning(
381
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
382
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
383
+ )
384
+
385
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
386
+ attention_mask = text_inputs.attention_mask.to(device)
387
+ else:
388
+ attention_mask = None
389
+
390
+ if clip_skip is None:
391
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
392
+ prompt_embeds = prompt_embeds[0]
393
+ else:
394
+ prompt_embeds = self.text_encoder(
395
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
396
+ )
397
+ # Access the `hidden_states` first, that contains a tuple of
398
+ # all the hidden states from the encoder layers. Then index into
399
+ # the tuple to access the hidden states from the desired layer.
400
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
401
+ # We also need to apply the final LayerNorm here to not mess with the
402
+ # representations. The `last_hidden_states` that we typically use for
403
+ # obtaining the final prompt representations passes through the LayerNorm
404
+ # layer.
405
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
406
+
407
+ if self.text_encoder is not None:
408
+ prompt_embeds_dtype = self.text_encoder.dtype
409
+ elif self.unet is not None:
410
+ prompt_embeds_dtype = self.unet.dtype
411
+ else:
412
+ prompt_embeds_dtype = prompt_embeds.dtype
413
+
414
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
415
+
416
+ bs_embed, seq_len, _ = prompt_embeds.shape
417
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
418
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
419
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
420
+
421
+ # get unconditional embeddings for classifier free guidance
422
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
423
+ uncond_tokens: List[str]
424
+ if negative_prompt is None:
425
+ uncond_tokens = [""] * batch_size
426
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
427
+ raise TypeError(
428
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
429
+ f" {type(prompt)}."
430
+ )
431
+ elif isinstance(negative_prompt, str):
432
+ uncond_tokens = [negative_prompt]
433
+ elif batch_size != len(negative_prompt):
434
+ raise ValueError(
435
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
436
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
437
+ " the batch size of `prompt`."
438
+ )
439
+ else:
440
+ uncond_tokens = negative_prompt
441
+
442
+ # textual inversion: procecss multi-vector tokens if necessary
443
+ if isinstance(self, TextualInversionLoaderMixin):
444
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
445
+
446
+ max_length = prompt_embeds.shape[1]
447
+ uncond_input = self.tokenizer(
448
+ uncond_tokens,
449
+ padding="max_length",
450
+ max_length=max_length,
451
+ truncation=True,
452
+ return_tensors="pt",
453
+ )
454
+
455
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
456
+ attention_mask = uncond_input.attention_mask.to(device)
457
+ else:
458
+ attention_mask = None
459
+
460
+ negative_prompt_embeds = self.text_encoder(
461
+ uncond_input.input_ids.to(device),
462
+ attention_mask=attention_mask,
463
+ )
464
+ negative_prompt_embeds = negative_prompt_embeds[0]
465
+
466
+ if do_classifier_free_guidance:
467
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
468
+ seq_len = negative_prompt_embeds.shape[1]
469
+
470
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
471
+
472
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
473
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
474
+
475
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
476
+ # Retrieve the original scale by scaling back the LoRA layers
477
+ unscale_lora_layers(self.text_encoder, lora_scale)
478
+
479
+ return prompt_embeds, negative_prompt_embeds
480
+
481
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
482
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
483
+ dtype = next(self.image_encoder.parameters()).dtype
484
+
485
+ if not isinstance(image, torch.Tensor):
486
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
487
+
488
+ image = image.to(device=device, dtype=dtype)
489
+ if output_hidden_states:
490
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
491
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
492
+ uncond_image_enc_hidden_states = self.image_encoder(
493
+ torch.zeros_like(image), output_hidden_states=True
494
+ ).hidden_states[-2]
495
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
496
+ num_images_per_prompt, dim=0
497
+ )
498
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
499
+ else:
500
+ image_embeds = self.image_encoder(image).image_embeds
501
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
502
+ uncond_image_embeds = torch.zeros_like(image_embeds)
503
+
504
+ return image_embeds, uncond_image_embeds
505
+
506
+ # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
507
+ def decode_latents(self, latents):
508
+ latents = 1 / self.vae.config.scaling_factor * latents
509
+
510
+ batch_size, channels, num_frames, height, width = latents.shape
511
+ latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
512
+
513
+ image = self.vae.decode(latents).sample
514
+ video = (
515
+ image[None, :]
516
+ .reshape(
517
+ (
518
+ batch_size,
519
+ num_frames,
520
+ -1,
521
+ )
522
+ + image.shape[2:]
523
+ )
524
+ .permute(0, 2, 1, 3, 4)
525
+ )
526
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
527
+ video = video.float()
528
+ return video
529
+
530
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
531
+ def enable_vae_slicing(self):
532
+ r"""
533
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
534
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
535
+ """
536
+ self.vae.enable_slicing()
537
+
538
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
539
+ def disable_vae_slicing(self):
540
+ r"""
541
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
542
+ computing decoding in one step.
543
+ """
544
+ self.vae.disable_slicing()
545
+
546
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
547
+ def enable_vae_tiling(self):
548
+ r"""
549
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
550
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
551
+ processing larger images.
552
+ """
553
+ self.vae.enable_tiling()
554
+
555
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
556
+ def disable_vae_tiling(self):
557
+ r"""
558
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
559
+ computing decoding in one step.
560
+ """
561
+ self.vae.disable_tiling()
562
+
563
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
564
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
565
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
566
+
567
+ The suffixes after the scaling factors represent the stages where they are being applied.
568
+
569
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
570
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
571
+
572
+ Args:
573
+ s1 (`float`):
574
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
575
+ mitigate "oversmoothing effect" in the enhanced denoising process.
576
+ s2 (`float`):
577
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
578
+ mitigate "oversmoothing effect" in the enhanced denoising process.
579
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
580
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
581
+ """
582
+ if not hasattr(self, "unet"):
583
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
584
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
585
+
586
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
587
+ def disable_freeu(self):
588
+ """Disables the FreeU mechanism if enabled."""
589
+ self.unet.disable_freeu()
590
+
591
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
592
+ def prepare_extra_step_kwargs(self, generator, eta):
593
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
594
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
595
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
596
+ # and should be between [0, 1]
597
+
598
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
599
+ extra_step_kwargs = {}
600
+ if accepts_eta:
601
+ extra_step_kwargs["eta"] = eta
602
+
603
+ # check if the scheduler accepts generator
604
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
605
+ if accepts_generator:
606
+ extra_step_kwargs["generator"] = generator
607
+ return extra_step_kwargs
608
+
609
+ def check_inputs(
610
+ self,
611
+ prompt,
612
+ height,
613
+ width,
614
+ callback_steps,
615
+ negative_prompt=None,
616
+ prompt_embeds=None,
617
+ negative_prompt_embeds=None,
618
+ callback_on_step_end_tensor_inputs=None,
619
+ latent_interpolation_method=None,
620
+ ):
621
+ if height % 8 != 0 or width % 8 != 0:
622
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
623
+
624
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
625
+ raise ValueError(
626
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
627
+ f" {type(callback_steps)}."
628
+ )
629
+ if callback_on_step_end_tensor_inputs is not None and not all(
630
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
631
+ ):
632
+ raise ValueError(
633
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
634
+ )
635
+
636
+ if prompt is not None and prompt_embeds is not None:
637
+ raise ValueError(
638
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
639
+ " only forward one of the two."
640
+ )
641
+ elif prompt is None and prompt_embeds is None:
642
+ raise ValueError(
643
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
644
+ )
645
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
646
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
647
+
648
+ if negative_prompt is not None and negative_prompt_embeds is not None:
649
+ raise ValueError(
650
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
651
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
652
+ )
653
+
654
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
655
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
656
+ raise ValueError(
657
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
658
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
659
+ f" {negative_prompt_embeds.shape}."
660
+ )
661
+
662
+ if latent_interpolation_method is not None:
663
+ if latent_interpolation_method not in ["lerp", "slerp"] and not isinstance(
664
+ latent_interpolation_method, FunctionType
665
+ ):
666
+ raise ValueError(
667
+ "`latent_interpolation_method` must be one of `lerp`, `slerp` or a Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]"
668
+ )
669
+
670
+ def prepare_latents(
671
+ self,
672
+ image,
673
+ strength,
674
+ batch_size,
675
+ num_channels_latents,
676
+ num_frames,
677
+ height,
678
+ width,
679
+ dtype,
680
+ device,
681
+ generator,
682
+ latents=None,
683
+ latent_interpolation_method="slerp",
684
+ ):
685
+ shape = (
686
+ batch_size,
687
+ num_channels_latents,
688
+ num_frames,
689
+ height // self.vae_scale_factor,
690
+ width // self.vae_scale_factor,
691
+ )
692
+
693
+ if latents is None:
694
+ image = image.to(device=device, dtype=dtype)
695
+
696
+ if image.shape[1] == 4:
697
+ latents = image
698
+ else:
699
+ # make sure the VAE is in float32 mode, as it overflows in float16
700
+ if self.vae.config.force_upcast:
701
+ image = image.float()
702
+ self.vae.to(dtype=torch.float32)
703
+
704
+ if isinstance(generator, list):
705
+ if len(generator) != batch_size:
706
+ raise ValueError(
707
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
708
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
709
+ )
710
+
711
+ init_latents = [
712
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i])
713
+ for i in range(batch_size)
714
+ ]
715
+ init_latents = torch.cat(init_latents, dim=0)
716
+ else:
717
+ init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
718
+
719
+ if self.vae.config.force_upcast:
720
+ self.vae.to(dtype)
721
+
722
+ init_latents = init_latents.to(dtype)
723
+ init_latents = self.vae.config.scaling_factor * init_latents
724
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
725
+ latents = latents * self.scheduler.init_noise_sigma
726
+
727
+ if latent_interpolation_method == "lerp":
728
+
729
+ def latent_cls(v0, v1, index):
730
+ return lerp(v0, v1, index / num_frames * (1 - strength))
731
+ elif latent_interpolation_method == "slerp":
732
+
733
+ def latent_cls(v0, v1, index):
734
+ return slerp(v0, v1, index / num_frames * (1 - strength))
735
+ else:
736
+ latent_cls = latent_interpolation_method
737
+
738
+ for i in range(num_frames):
739
+ latents[:, :, i, :, :] = latent_cls(latents[:, :, i, :, :], init_latents, i)
740
+ else:
741
+ if shape != latents.shape:
742
+ # [B, C, F, H, W]
743
+ raise ValueError(f"`latents` expected to have {shape=}, but found {latents.shape=}")
744
+ latents = latents.to(device, dtype=dtype)
745
+
746
+ return latents
747
+
748
+ @torch.no_grad()
749
+ def __call__(
750
+ self,
751
+ image: PipelineImageInput,
752
+ prompt: Optional[Union[str, List[str]]] = None,
753
+ height: Optional[int] = None,
754
+ width: Optional[int] = None,
755
+ num_frames: int = 16,
756
+ num_inference_steps: int = 50,
757
+ timesteps: Optional[List[int]] = None,
758
+ guidance_scale: float = 7.5,
759
+ strength: float = 0.8,
760
+ negative_prompt: Optional[Union[str, List[str]]] = None,
761
+ num_videos_per_prompt: Optional[int] = 1,
762
+ eta: float = 0.0,
763
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
764
+ latents: Optional[torch.FloatTensor] = None,
765
+ prompt_embeds: Optional[torch.FloatTensor] = None,
766
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
767
+ ip_adapter_image: Optional[PipelineImageInput] = None,
768
+ output_type: Optional[str] = "pil",
769
+ return_dict: bool = True,
770
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
771
+ callback_steps: Optional[int] = 1,
772
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
773
+ clip_skip: Optional[int] = None,
774
+ latent_interpolation_method: Union[str, Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]] = "slerp",
775
+ ):
776
+ r"""
777
+ The call function to the pipeline for generation.
778
+
779
+ Args:
780
+ image (`PipelineImageInput`):
781
+ The input image to condition the generation on.
782
+ prompt (`str` or `List[str]`, *optional*):
783
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
784
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
785
+ The height in pixels of the generated video.
786
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
787
+ The width in pixels of the generated video.
788
+ num_frames (`int`, *optional*, defaults to 16):
789
+ The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
790
+ amounts to 2 seconds of video.
791
+ num_inference_steps (`int`, *optional*, defaults to 50):
792
+ The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
793
+ expense of slower inference.
794
+ strength (`float`, *optional*, defaults to 0.8):
795
+ Higher strength leads to more differences between original image and generated video.
796
+ guidance_scale (`float`, *optional*, defaults to 7.5):
797
+ A higher guidance scale value encourages the model to generate images closely linked to the text
798
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
799
+ negative_prompt (`str` or `List[str]`, *optional*):
800
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
801
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
802
+ eta (`float`, *optional*, defaults to 0.0):
803
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
804
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
805
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
806
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
807
+ generation deterministic.
808
+ latents (`torch.FloatTensor`, *optional*):
809
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
810
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
811
+ tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
812
+ `(batch_size, num_channel, num_frames, height, width)`.
813
+ prompt_embeds (`torch.FloatTensor`, *optional*):
814
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
815
+ provided, text embeddings are generated from the `prompt` input argument.
816
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
817
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
818
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
819
+ ip_adapter_image: (`PipelineImageInput`, *optional*):
820
+ Optional image input to work with IP Adapters.
821
+ output_type (`str`, *optional*, defaults to `"pil"`):
822
+ The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or
823
+ `np.array`.
824
+ return_dict (`bool`, *optional*, defaults to `True`):
825
+ Whether or not to return a [`AnimateDiffImgToVideoPipelineOutput`] instead
826
+ of a plain tuple.
827
+ callback (`Callable`, *optional*):
828
+ A function that calls every `callback_steps` steps during inference. The function is called with the
829
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
830
+ callback_steps (`int`, *optional*, defaults to 1):
831
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
832
+ every step.
833
+ cross_attention_kwargs (`dict`, *optional*):
834
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
835
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
836
+ clip_skip (`int`, *optional*):
837
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
838
+ the output of the pre-final layer will be used for computing the prompt embeddings.
839
+ latent_interpolation_method (`str` or `Callable[[torch.Tensor, torch.Tensor, int], torch.Tensor]]`, *optional*):
840
+ Must be one of "lerp", "slerp" or a callable that takes in a random noisy latent, image latent and a frame index
841
+ as input and returns an initial latent for sampling.
842
+ Examples:
843
+
844
+ Returns:
845
+ [`AnimateDiffImgToVideoPipelineOutput`] or `tuple`:
846
+ If `return_dict` is `True`, [`AnimateDiffImgToVideoPipelineOutput`] is
847
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
848
+ """
849
+ # 0. Default height and width to unet
850
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
851
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
852
+
853
+ num_videos_per_prompt = 1
854
+
855
+ # 1. Check inputs. Raise error if not correct
856
+ self.check_inputs(
857
+ prompt=prompt,
858
+ height=height,
859
+ width=width,
860
+ callback_steps=callback_steps,
861
+ negative_prompt=negative_prompt,
862
+ prompt_embeds=prompt_embeds,
863
+ negative_prompt_embeds=negative_prompt_embeds,
864
+ latent_interpolation_method=latent_interpolation_method,
865
+ )
866
+
867
+ # 2. Define call parameters
868
+ if prompt is not None and isinstance(prompt, str):
869
+ batch_size = 1
870
+ elif prompt is not None and isinstance(prompt, list):
871
+ batch_size = len(prompt)
872
+ else:
873
+ batch_size = prompt_embeds.shape[0]
874
+
875
+ device = self._execution_device
876
+
877
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
878
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
879
+ # corresponds to doing no classifier free guidance.
880
+ do_classifier_free_guidance = guidance_scale > 1.0
881
+
882
+ # 3. Encode input prompt
883
+ text_encoder_lora_scale = (
884
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
885
+ )
886
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
887
+ prompt,
888
+ device,
889
+ num_videos_per_prompt,
890
+ do_classifier_free_guidance,
891
+ negative_prompt,
892
+ prompt_embeds=prompt_embeds,
893
+ negative_prompt_embeds=negative_prompt_embeds,
894
+ lora_scale=text_encoder_lora_scale,
895
+ clip_skip=clip_skip,
896
+ )
897
+
898
+ # For classifier free guidance, we need to do two forward passes.
899
+ # Here we concatenate the unconditional and text embeddings into a single batch
900
+ # to avoid doing two forward passes
901
+ if do_classifier_free_guidance:
902
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
903
+
904
+ if ip_adapter_image is not None:
905
+ output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True
906
+ image_embeds, negative_image_embeds = self.encode_image(
907
+ ip_adapter_image, device, num_videos_per_prompt, output_hidden_state
908
+ )
909
+ if do_classifier_free_guidance:
910
+ image_embeds = torch.cat([negative_image_embeds, image_embeds])
911
+
912
+ # 4. Preprocess image
913
+ image = self.image_processor.preprocess(image, height=height, width=width)
914
+
915
+ # 5. Prepare timesteps
916
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
917
+
918
+ # 6. Prepare latent variables
919
+ num_channels_latents = self.unet.config.in_channels
920
+ latents = self.prepare_latents(
921
+ image=image,
922
+ strength=strength,
923
+ batch_size=batch_size * num_videos_per_prompt,
924
+ num_channels_latents=num_channels_latents,
925
+ num_frames=num_frames,
926
+ height=height,
927
+ width=width,
928
+ dtype=prompt_embeds.dtype,
929
+ device=device,
930
+ generator=generator,
931
+ latents=latents,
932
+ latent_interpolation_method=latent_interpolation_method,
933
+ )
934
+
935
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
936
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
937
+
938
+ # 8. Add image embeds for IP-Adapter
939
+ added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else None
940
+
941
+ # 9. Denoising loop
942
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
943
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
944
+ for i, t in enumerate(timesteps):
945
+ # expand the latents if we are doing classifier free guidance
946
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
947
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
948
+
949
+ # predict the noise residual
950
+ noise_pred = self.unet(
951
+ latent_model_input,
952
+ t,
953
+ encoder_hidden_states=prompt_embeds,
954
+ cross_attention_kwargs=cross_attention_kwargs,
955
+ added_cond_kwargs=added_cond_kwargs,
956
+ ).sample
957
+
958
+ # perform guidance
959
+ if do_classifier_free_guidance:
960
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
961
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
962
+
963
+ # compute the previous noisy sample x_t -> x_t-1
964
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
965
+
966
+ # call the callback, if provided
967
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
968
+ progress_bar.update()
969
+ if callback is not None and i % callback_steps == 0:
970
+ callback(i, t, latents)
971
+
972
+ if output_type == "latent":
973
+ return AnimateDiffImgToVideoPipelineOutput(frames=latents)
974
+
975
+ # 10. Post-processing
976
+ video_tensor = self.decode_latents(latents)
977
+
978
+ if output_type == "pt":
979
+ video = video_tensor
980
+ else:
981
+ video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
982
+
983
+ # 11. Offload all models
984
+ self.maybe_free_model_hooks()
985
+
986
+ if not return_dict:
987
+ return (video,)
988
+
989
+ return AnimateDiffImgToVideoPipelineOutput(frames=video)
v0.26.3/pipeline_demofusion_sdxl.py ADDED
@@ -0,0 +1,1414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import os
3
+ import random
4
+ import warnings
5
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6
+
7
+ import matplotlib.pyplot as plt
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
11
+
12
+ from diffusers.image_processor import VaeImageProcessor
13
+ from diffusers.loaders import (
14
+ FromSingleFileMixin,
15
+ LoraLoaderMixin,
16
+ TextualInversionLoaderMixin,
17
+ )
18
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
19
+ from diffusers.models.attention_processor import (
20
+ AttnProcessor2_0,
21
+ LoRAAttnProcessor2_0,
22
+ LoRAXFormersAttnProcessor,
23
+ XFormersAttnProcessor,
24
+ )
25
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
26
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
27
+ from diffusers.schedulers import KarrasDiffusionSchedulers
28
+ from diffusers.utils import (
29
+ is_accelerate_available,
30
+ is_accelerate_version,
31
+ is_invisible_watermark_available,
32
+ logging,
33
+ replace_example_docstring,
34
+ )
35
+ from diffusers.utils.torch_utils import randn_tensor
36
+
37
+
38
+ if is_invisible_watermark_available():
39
+ from diffusers.pipelines.stable_diffusion_xl.watermark import (
40
+ StableDiffusionXLWatermarker,
41
+ )
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+ EXAMPLE_DOC_STRING = """
47
+ Examples:
48
+ ```py
49
+ >>> import torch
50
+ >>> from diffusers import StableDiffusionXLPipeline
51
+
52
+ >>> pipe = StableDiffusionXLPipeline.from_pretrained(
53
+ ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
54
+ ... )
55
+ >>> pipe = pipe.to("cuda")
56
+
57
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
58
+ >>> image = pipe(prompt).images[0]
59
+ ```
60
+ """
61
+
62
+
63
+ def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3):
64
+ x_coord = torch.arange(kernel_size)
65
+ gaussian_1d = torch.exp(-((x_coord - (kernel_size - 1) / 2) ** 2) / (2 * sigma**2))
66
+ gaussian_1d = gaussian_1d / gaussian_1d.sum()
67
+ gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :]
68
+ kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1)
69
+
70
+ return kernel
71
+
72
+
73
+ def gaussian_filter(latents, kernel_size=3, sigma=1.0):
74
+ channels = latents.shape[1]
75
+ kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype)
76
+ blurred_latents = F.conv2d(latents, kernel, padding=kernel_size // 2, groups=channels)
77
+
78
+ return blurred_latents
79
+
80
+
81
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
82
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
83
+ """
84
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
85
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
86
+ """
87
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
88
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
89
+ # rescale the results from guidance (fixes overexposure)
90
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
91
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
92
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
93
+ return noise_cfg
94
+
95
+
96
+ class DemoFusionSDXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin):
97
+ r"""
98
+ Pipeline for text-to-image generation using Stable Diffusion XL.
99
+
100
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
101
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
102
+
103
+ In addition the pipeline inherits the following loading methods:
104
+ - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`]
105
+ - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
106
+
107
+ as well as the following saving methods:
108
+ - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
109
+
110
+ Args:
111
+ vae ([`AutoencoderKL`]):
112
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
113
+ text_encoder ([`CLIPTextModel`]):
114
+ Frozen text-encoder. Stable Diffusion XL uses the text portion of
115
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
116
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
117
+ text_encoder_2 ([` CLIPTextModelWithProjection`]):
118
+ Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
119
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
120
+ specifically the
121
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
122
+ variant.
123
+ tokenizer (`CLIPTokenizer`):
124
+ Tokenizer of class
125
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
126
+ tokenizer_2 (`CLIPTokenizer`):
127
+ Second Tokenizer of class
128
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
129
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
130
+ scheduler ([`SchedulerMixin`]):
131
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
132
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
133
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
134
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
135
+ `stabilityai/stable-diffusion-xl-base-1-0`.
136
+ add_watermarker (`bool`, *optional*):
137
+ Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
138
+ watermark output images. If not defined, it will default to True if the package is installed, otherwise no
139
+ watermarker will be used.
140
+ """
141
+
142
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
143
+
144
+ def __init__(
145
+ self,
146
+ vae: AutoencoderKL,
147
+ text_encoder: CLIPTextModel,
148
+ text_encoder_2: CLIPTextModelWithProjection,
149
+ tokenizer: CLIPTokenizer,
150
+ tokenizer_2: CLIPTokenizer,
151
+ unet: UNet2DConditionModel,
152
+ scheduler: KarrasDiffusionSchedulers,
153
+ force_zeros_for_empty_prompt: bool = True,
154
+ add_watermarker: Optional[bool] = None,
155
+ ):
156
+ super().__init__()
157
+
158
+ self.register_modules(
159
+ vae=vae,
160
+ text_encoder=text_encoder,
161
+ text_encoder_2=text_encoder_2,
162
+ tokenizer=tokenizer,
163
+ tokenizer_2=tokenizer_2,
164
+ unet=unet,
165
+ scheduler=scheduler,
166
+ )
167
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
168
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
169
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
170
+ self.default_sample_size = self.unet.config.sample_size
171
+
172
+ add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
173
+
174
+ if add_watermarker:
175
+ self.watermark = StableDiffusionXLWatermarker()
176
+ else:
177
+ self.watermark = None
178
+
179
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
180
+ def enable_vae_slicing(self):
181
+ r"""
182
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
183
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
184
+ """
185
+ self.vae.enable_slicing()
186
+
187
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
188
+ def disable_vae_slicing(self):
189
+ r"""
190
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
191
+ computing decoding in one step.
192
+ """
193
+ self.vae.disable_slicing()
194
+
195
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
196
+ def enable_vae_tiling(self):
197
+ r"""
198
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
199
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
200
+ processing larger images.
201
+ """
202
+ self.vae.enable_tiling()
203
+
204
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
205
+ def disable_vae_tiling(self):
206
+ r"""
207
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
208
+ computing decoding in one step.
209
+ """
210
+ self.vae.disable_tiling()
211
+
212
+ def encode_prompt(
213
+ self,
214
+ prompt: str,
215
+ prompt_2: Optional[str] = None,
216
+ device: Optional[torch.device] = None,
217
+ num_images_per_prompt: int = 1,
218
+ do_classifier_free_guidance: bool = True,
219
+ negative_prompt: Optional[str] = None,
220
+ negative_prompt_2: Optional[str] = None,
221
+ prompt_embeds: Optional[torch.FloatTensor] = None,
222
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
223
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
224
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
225
+ lora_scale: Optional[float] = None,
226
+ ):
227
+ r"""
228
+ Encodes the prompt into text encoder hidden states.
229
+
230
+ Args:
231
+ prompt (`str` or `List[str]`, *optional*):
232
+ prompt to be encoded
233
+ prompt_2 (`str` or `List[str]`, *optional*):
234
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
235
+ used in both text-encoders
236
+ device: (`torch.device`):
237
+ torch device
238
+ num_images_per_prompt (`int`):
239
+ number of images that should be generated per prompt
240
+ do_classifier_free_guidance (`bool`):
241
+ whether to use classifier free guidance or not
242
+ negative_prompt (`str` or `List[str]`, *optional*):
243
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
244
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
245
+ less than `1`).
246
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
247
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
248
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
249
+ prompt_embeds (`torch.FloatTensor`, *optional*):
250
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
251
+ provided, text embeddings will be generated from `prompt` input argument.
252
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
253
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
254
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
255
+ argument.
256
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
257
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
258
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
259
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
260
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
261
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
262
+ input argument.
263
+ lora_scale (`float`, *optional*):
264
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
265
+ """
266
+ device = device or self._execution_device
267
+
268
+ # set lora scale so that monkey patched LoRA
269
+ # function of text encoder can correctly access it
270
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
271
+ self._lora_scale = lora_scale
272
+
273
+ # dynamically adjust the LoRA scale
274
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
275
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
276
+
277
+ if prompt is not None and isinstance(prompt, str):
278
+ batch_size = 1
279
+ elif prompt is not None and isinstance(prompt, list):
280
+ batch_size = len(prompt)
281
+ else:
282
+ batch_size = prompt_embeds.shape[0]
283
+
284
+ # Define tokenizers and text encoders
285
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
286
+ text_encoders = (
287
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
288
+ )
289
+
290
+ if prompt_embeds is None:
291
+ prompt_2 = prompt_2 or prompt
292
+ # textual inversion: procecss multi-vector tokens if necessary
293
+ prompt_embeds_list = []
294
+ prompts = [prompt, prompt_2]
295
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
296
+ if isinstance(self, TextualInversionLoaderMixin):
297
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
298
+
299
+ text_inputs = tokenizer(
300
+ prompt,
301
+ padding="max_length",
302
+ max_length=tokenizer.model_max_length,
303
+ truncation=True,
304
+ return_tensors="pt",
305
+ )
306
+
307
+ text_input_ids = text_inputs.input_ids
308
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
309
+
310
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
311
+ text_input_ids, untruncated_ids
312
+ ):
313
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
314
+ logger.warning(
315
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
316
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
317
+ )
318
+
319
+ prompt_embeds = text_encoder(
320
+ text_input_ids.to(device),
321
+ output_hidden_states=True,
322
+ )
323
+
324
+ # We are only ALWAYS interested in the pooled output of the final text encoder
325
+ pooled_prompt_embeds = prompt_embeds[0]
326
+ prompt_embeds = prompt_embeds.hidden_states[-2]
327
+
328
+ prompt_embeds_list.append(prompt_embeds)
329
+
330
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
331
+
332
+ # get unconditional embeddings for classifier free guidance
333
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
334
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
335
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
336
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
337
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
338
+ negative_prompt = negative_prompt or ""
339
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
340
+
341
+ uncond_tokens: List[str]
342
+ if prompt is not None and type(prompt) is not type(negative_prompt):
343
+ raise TypeError(
344
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
345
+ f" {type(prompt)}."
346
+ )
347
+ elif isinstance(negative_prompt, str):
348
+ uncond_tokens = [negative_prompt, negative_prompt_2]
349
+ elif batch_size != len(negative_prompt):
350
+ raise ValueError(
351
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
352
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
353
+ " the batch size of `prompt`."
354
+ )
355
+ else:
356
+ uncond_tokens = [negative_prompt, negative_prompt_2]
357
+
358
+ negative_prompt_embeds_list = []
359
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
360
+ if isinstance(self, TextualInversionLoaderMixin):
361
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
362
+
363
+ max_length = prompt_embeds.shape[1]
364
+ uncond_input = tokenizer(
365
+ negative_prompt,
366
+ padding="max_length",
367
+ max_length=max_length,
368
+ truncation=True,
369
+ return_tensors="pt",
370
+ )
371
+
372
+ negative_prompt_embeds = text_encoder(
373
+ uncond_input.input_ids.to(device),
374
+ output_hidden_states=True,
375
+ )
376
+ # We are only ALWAYS interested in the pooled output of the final text encoder
377
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
378
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
379
+
380
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
381
+
382
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
383
+
384
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
385
+ bs_embed, seq_len, _ = prompt_embeds.shape
386
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
387
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
388
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
389
+
390
+ if do_classifier_free_guidance:
391
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
392
+ seq_len = negative_prompt_embeds.shape[1]
393
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
394
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
395
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
396
+
397
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
398
+ bs_embed * num_images_per_prompt, -1
399
+ )
400
+ if do_classifier_free_guidance:
401
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
402
+ bs_embed * num_images_per_prompt, -1
403
+ )
404
+
405
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
406
+
407
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
408
+ def prepare_extra_step_kwargs(self, generator, eta):
409
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
410
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
411
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
412
+ # and should be between [0, 1]
413
+
414
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
415
+ extra_step_kwargs = {}
416
+ if accepts_eta:
417
+ extra_step_kwargs["eta"] = eta
418
+
419
+ # check if the scheduler accepts generator
420
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
421
+ if accepts_generator:
422
+ extra_step_kwargs["generator"] = generator
423
+ return extra_step_kwargs
424
+
425
+ def check_inputs(
426
+ self,
427
+ prompt,
428
+ prompt_2,
429
+ height,
430
+ width,
431
+ callback_steps,
432
+ negative_prompt=None,
433
+ negative_prompt_2=None,
434
+ prompt_embeds=None,
435
+ negative_prompt_embeds=None,
436
+ pooled_prompt_embeds=None,
437
+ negative_pooled_prompt_embeds=None,
438
+ num_images_per_prompt=None,
439
+ ):
440
+ if height % 8 != 0 or width % 8 != 0:
441
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
442
+
443
+ if (callback_steps is None) or (
444
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
445
+ ):
446
+ raise ValueError(
447
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
448
+ f" {type(callback_steps)}."
449
+ )
450
+
451
+ if prompt is not None and prompt_embeds is not None:
452
+ raise ValueError(
453
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
454
+ " only forward one of the two."
455
+ )
456
+ elif prompt_2 is not None and prompt_embeds is not None:
457
+ raise ValueError(
458
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
459
+ " only forward one of the two."
460
+ )
461
+ elif prompt is None and prompt_embeds is None:
462
+ raise ValueError(
463
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
464
+ )
465
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
466
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
467
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
468
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
469
+
470
+ if negative_prompt is not None and negative_prompt_embeds is not None:
471
+ raise ValueError(
472
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
473
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
474
+ )
475
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
476
+ raise ValueError(
477
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
478
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
479
+ )
480
+
481
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
482
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
483
+ raise ValueError(
484
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
485
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
486
+ f" {negative_prompt_embeds.shape}."
487
+ )
488
+
489
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
490
+ raise ValueError(
491
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
492
+ )
493
+
494
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
495
+ raise ValueError(
496
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
497
+ )
498
+
499
+ # DemoFusion specific checks
500
+ if max(height, width) % 1024 != 0:
501
+ raise ValueError(
502
+ f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}."
503
+ )
504
+
505
+ if num_images_per_prompt != 1:
506
+ warnings.warn("num_images_per_prompt != 1 is not supported by DemoFusion and will be ignored.")
507
+ num_images_per_prompt = 1
508
+
509
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
510
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
511
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
512
+ if isinstance(generator, list) and len(generator) != batch_size:
513
+ raise ValueError(
514
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
515
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
516
+ )
517
+
518
+ if latents is None:
519
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
520
+ else:
521
+ latents = latents.to(device)
522
+
523
+ # scale the initial noise by the standard deviation required by the scheduler
524
+ latents = latents * self.scheduler.init_noise_sigma
525
+ return latents
526
+
527
+ def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype):
528
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
529
+
530
+ passed_add_embed_dim = (
531
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim
532
+ )
533
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
534
+
535
+ if expected_add_embed_dim != passed_add_embed_dim:
536
+ raise ValueError(
537
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
538
+ )
539
+
540
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
541
+ return add_time_ids
542
+
543
+ def get_views(self, height, width, window_size=128, stride=64, random_jitter=False):
544
+ height //= self.vae_scale_factor
545
+ width //= self.vae_scale_factor
546
+ num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1
547
+ num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1
548
+ total_num_blocks = int(num_blocks_height * num_blocks_width)
549
+ views = []
550
+ for i in range(total_num_blocks):
551
+ h_start = int((i // num_blocks_width) * stride)
552
+ h_end = h_start + window_size
553
+ w_start = int((i % num_blocks_width) * stride)
554
+ w_end = w_start + window_size
555
+
556
+ if h_end > height:
557
+ h_start = int(h_start + height - h_end)
558
+ h_end = int(height)
559
+ if w_end > width:
560
+ w_start = int(w_start + width - w_end)
561
+ w_end = int(width)
562
+ if h_start < 0:
563
+ h_end = int(h_end - h_start)
564
+ h_start = 0
565
+ if w_start < 0:
566
+ w_end = int(w_end - w_start)
567
+ w_start = 0
568
+
569
+ if random_jitter:
570
+ jitter_range = (window_size - stride) // 4
571
+ w_jitter = 0
572
+ h_jitter = 0
573
+ if (w_start != 0) and (w_end != width):
574
+ w_jitter = random.randint(-jitter_range, jitter_range)
575
+ elif (w_start == 0) and (w_end != width):
576
+ w_jitter = random.randint(-jitter_range, 0)
577
+ elif (w_start != 0) and (w_end == width):
578
+ w_jitter = random.randint(0, jitter_range)
579
+ if (h_start != 0) and (h_end != height):
580
+ h_jitter = random.randint(-jitter_range, jitter_range)
581
+ elif (h_start == 0) and (h_end != height):
582
+ h_jitter = random.randint(-jitter_range, 0)
583
+ elif (h_start != 0) and (h_end == height):
584
+ h_jitter = random.randint(0, jitter_range)
585
+ h_start += h_jitter + jitter_range
586
+ h_end += h_jitter + jitter_range
587
+ w_start += w_jitter + jitter_range
588
+ w_end += w_jitter + jitter_range
589
+
590
+ views.append((h_start, h_end, w_start, w_end))
591
+ return views
592
+
593
+ def tiled_decode(self, latents, current_height, current_width):
594
+ core_size = self.unet.config.sample_size // 4
595
+ core_stride = core_size
596
+ pad_size = self.unet.config.sample_size // 4 * 3
597
+ decoder_view_batch_size = 1
598
+
599
+ views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size)
600
+ views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)]
601
+ latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), "constant", 0)
602
+ image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device)
603
+ count = torch.zeros_like(image).to(latents.device)
604
+ # get the latents corresponding to the current view coordinates
605
+ with self.progress_bar(total=len(views_batch)) as progress_bar:
606
+ for j, batch_view in enumerate(views_batch):
607
+ len(batch_view)
608
+ latents_for_view = torch.cat(
609
+ [
610
+ latents_[:, :, h_start : h_end + pad_size * 2, w_start : w_end + pad_size * 2]
611
+ for h_start, h_end, w_start, w_end in batch_view
612
+ ]
613
+ )
614
+ image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0]
615
+ h_start, h_end, w_start, w_end = views[j]
616
+ h_start, h_end, w_start, w_end = (
617
+ h_start * self.vae_scale_factor,
618
+ h_end * self.vae_scale_factor,
619
+ w_start * self.vae_scale_factor,
620
+ w_end * self.vae_scale_factor,
621
+ )
622
+ p_h_start, p_h_end, p_w_start, p_w_end = (
623
+ pad_size * self.vae_scale_factor,
624
+ image_patch.size(2) - pad_size * self.vae_scale_factor,
625
+ pad_size * self.vae_scale_factor,
626
+ image_patch.size(3) - pad_size * self.vae_scale_factor,
627
+ )
628
+ image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end]
629
+ count[:, :, h_start:h_end, w_start:w_end] += 1
630
+ progress_bar.update()
631
+ image = image / count
632
+
633
+ return image
634
+
635
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
636
+ def upcast_vae(self):
637
+ dtype = self.vae.dtype
638
+ self.vae.to(dtype=torch.float32)
639
+ use_torch_2_0_or_xformers = isinstance(
640
+ self.vae.decoder.mid_block.attentions[0].processor,
641
+ (
642
+ AttnProcessor2_0,
643
+ XFormersAttnProcessor,
644
+ LoRAXFormersAttnProcessor,
645
+ LoRAAttnProcessor2_0,
646
+ ),
647
+ )
648
+ # if xformers or torch_2_0 is used attention block does not need
649
+ # to be in float32 which can save lots of memory
650
+ if use_torch_2_0_or_xformers:
651
+ self.vae.post_quant_conv.to(dtype)
652
+ self.vae.decoder.conv_in.to(dtype)
653
+ self.vae.decoder.mid_block.to(dtype)
654
+
655
+ @torch.no_grad()
656
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
657
+ def __call__(
658
+ self,
659
+ prompt: Union[str, List[str]] = None,
660
+ prompt_2: Optional[Union[str, List[str]]] = None,
661
+ height: Optional[int] = None,
662
+ width: Optional[int] = None,
663
+ num_inference_steps: int = 50,
664
+ denoising_end: Optional[float] = None,
665
+ guidance_scale: float = 5.0,
666
+ negative_prompt: Optional[Union[str, List[str]]] = None,
667
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
668
+ num_images_per_prompt: Optional[int] = 1,
669
+ eta: float = 0.0,
670
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
671
+ latents: Optional[torch.FloatTensor] = None,
672
+ prompt_embeds: Optional[torch.FloatTensor] = None,
673
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
674
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
675
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
676
+ output_type: Optional[str] = "pil",
677
+ return_dict: bool = False,
678
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
679
+ callback_steps: int = 1,
680
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
681
+ guidance_rescale: float = 0.0,
682
+ original_size: Optional[Tuple[int, int]] = None,
683
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
684
+ target_size: Optional[Tuple[int, int]] = None,
685
+ negative_original_size: Optional[Tuple[int, int]] = None,
686
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
687
+ negative_target_size: Optional[Tuple[int, int]] = None,
688
+ ################### DemoFusion specific parameters ####################
689
+ view_batch_size: int = 16,
690
+ multi_decoder: bool = True,
691
+ stride: Optional[int] = 64,
692
+ cosine_scale_1: Optional[float] = 3.0,
693
+ cosine_scale_2: Optional[float] = 1.0,
694
+ cosine_scale_3: Optional[float] = 1.0,
695
+ sigma: Optional[float] = 0.8,
696
+ show_image: bool = False,
697
+ ):
698
+ r"""
699
+ Function invoked when calling the pipeline for generation.
700
+
701
+ Args:
702
+ prompt (`str` or `List[str]`, *optional*):
703
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
704
+ instead.
705
+ prompt_2 (`str` or `List[str]`, *optional*):
706
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
707
+ used in both text-encoders
708
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
709
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
710
+ Anything below 512 pixels won't work well for
711
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
712
+ and checkpoints that are not specifically fine-tuned on low resolutions.
713
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
714
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
715
+ Anything below 512 pixels won't work well for
716
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
717
+ and checkpoints that are not specifically fine-tuned on low resolutions.
718
+ num_inference_steps (`int`, *optional*, defaults to 50):
719
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
720
+ expense of slower inference.
721
+ denoising_end (`float`, *optional*):
722
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
723
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
724
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
725
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
726
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
727
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
728
+ guidance_scale (`float`, *optional*, defaults to 5.0):
729
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
730
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
731
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
732
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
733
+ usually at the expense of lower image quality.
734
+ negative_prompt (`str` or `List[str]`, *optional*):
735
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
736
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
737
+ less than `1`).
738
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
739
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
740
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
741
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
742
+ The number of images to generate per prompt.
743
+ eta (`float`, *optional*, defaults to 0.0):
744
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
745
+ [`schedulers.DDIMScheduler`], will be ignored for others.
746
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
747
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
748
+ to make generation deterministic.
749
+ latents (`torch.FloatTensor`, *optional*):
750
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
751
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
752
+ tensor will ge generated by sampling using the supplied random `generator`.
753
+ prompt_embeds (`torch.FloatTensor`, *optional*):
754
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
755
+ provided, text embeddings will be generated from `prompt` input argument.
756
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
757
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
758
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
759
+ argument.
760
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
761
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
762
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
763
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
764
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
765
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
766
+ input argument.
767
+ output_type (`str`, *optional*, defaults to `"pil"`):
768
+ The output format of the generate image. Choose between
769
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
770
+ return_dict (`bool`, *optional*, defaults to `True`):
771
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
772
+ of a plain tuple.
773
+ callback (`Callable`, *optional*):
774
+ A function that will be called every `callback_steps` steps during inference. The function will be
775
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
776
+ callback_steps (`int`, *optional*, defaults to 1):
777
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
778
+ called at every step.
779
+ cross_attention_kwargs (`dict`, *optional*):
780
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
781
+ `self.processor` in
782
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
783
+ guidance_rescale (`float`, *optional*, defaults to 0.7):
784
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
785
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
786
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
787
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
788
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
789
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
790
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
791
+ explained in section 2.2 of
792
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
793
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
794
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
795
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
796
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
797
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
798
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
799
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
800
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
801
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
802
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
803
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
804
+ micro-conditioning as explained in section 2.2 of
805
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
806
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
807
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
808
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
809
+ micro-conditioning as explained in section 2.2 of
810
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
811
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
812
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
813
+ To negatively condition the generation process based on a target image resolution. It should be as same
814
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
815
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
816
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
817
+ ################### DemoFusion specific parameters ####################
818
+ view_batch_size (`int`, defaults to 16):
819
+ The batch size for multiple denoising paths. Typically, a larger batch size can result in higher
820
+ efficiency but comes with increased GPU memory requirements.
821
+ multi_decoder (`bool`, defaults to True):
822
+ Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072,
823
+ a tiled decoder becomes necessary.
824
+ stride (`int`, defaults to 64):
825
+ The stride of moving local patches. A smaller stride is better for alleviating seam issues,
826
+ but it also introduces additional computational overhead and inference time.
827
+ cosine_scale_1 (`float`, defaults to 3):
828
+ Control the strength of skip-residual. For specific impacts, please refer to Appendix C
829
+ in the DemoFusion paper.
830
+ cosine_scale_2 (`float`, defaults to 1):
831
+ Control the strength of dilated sampling. For specific impacts, please refer to Appendix C
832
+ in the DemoFusion paper.
833
+ cosine_scale_3 (`float`, defaults to 1):
834
+ Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C
835
+ in the DemoFusion paper.
836
+ sigma (`float`, defaults to 1):
837
+ The standerd value of the gaussian filter.
838
+ show_image (`bool`, defaults to False):
839
+ Determine whether to show intermediate results during generation.
840
+
841
+ Examples:
842
+
843
+ Returns:
844
+ a `list` with the generated images at each phase.
845
+ """
846
+
847
+ # 0. Default height and width to unet
848
+ height = height or self.default_sample_size * self.vae_scale_factor
849
+ width = width or self.default_sample_size * self.vae_scale_factor
850
+
851
+ x1_size = self.default_sample_size * self.vae_scale_factor
852
+
853
+ height_scale = height / x1_size
854
+ width_scale = width / x1_size
855
+ scale_num = int(max(height_scale, width_scale))
856
+ aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale)
857
+
858
+ original_size = original_size or (height, width)
859
+ target_size = target_size or (height, width)
860
+
861
+ # 1. Check inputs. Raise error if not correct
862
+ self.check_inputs(
863
+ prompt,
864
+ prompt_2,
865
+ height,
866
+ width,
867
+ callback_steps,
868
+ negative_prompt,
869
+ negative_prompt_2,
870
+ prompt_embeds,
871
+ negative_prompt_embeds,
872
+ pooled_prompt_embeds,
873
+ negative_pooled_prompt_embeds,
874
+ num_images_per_prompt,
875
+ )
876
+
877
+ # 2. Define call parameters
878
+ if prompt is not None and isinstance(prompt, str):
879
+ batch_size = 1
880
+ elif prompt is not None and isinstance(prompt, list):
881
+ batch_size = len(prompt)
882
+ else:
883
+ batch_size = prompt_embeds.shape[0]
884
+
885
+ device = self._execution_device
886
+
887
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
888
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
889
+ # corresponds to doing no classifier free guidance.
890
+ do_classifier_free_guidance = guidance_scale > 1.0
891
+
892
+ # 3. Encode input prompt
893
+ text_encoder_lora_scale = (
894
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
895
+ )
896
+ (
897
+ prompt_embeds,
898
+ negative_prompt_embeds,
899
+ pooled_prompt_embeds,
900
+ negative_pooled_prompt_embeds,
901
+ ) = self.encode_prompt(
902
+ prompt=prompt,
903
+ prompt_2=prompt_2,
904
+ device=device,
905
+ num_images_per_prompt=num_images_per_prompt,
906
+ do_classifier_free_guidance=do_classifier_free_guidance,
907
+ negative_prompt=negative_prompt,
908
+ negative_prompt_2=negative_prompt_2,
909
+ prompt_embeds=prompt_embeds,
910
+ negative_prompt_embeds=negative_prompt_embeds,
911
+ pooled_prompt_embeds=pooled_prompt_embeds,
912
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
913
+ lora_scale=text_encoder_lora_scale,
914
+ )
915
+
916
+ # 4. Prepare timesteps
917
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
918
+
919
+ timesteps = self.scheduler.timesteps
920
+
921
+ # 5. Prepare latent variables
922
+ num_channels_latents = self.unet.config.in_channels
923
+ latents = self.prepare_latents(
924
+ batch_size * num_images_per_prompt,
925
+ num_channels_latents,
926
+ height // scale_num,
927
+ width // scale_num,
928
+ prompt_embeds.dtype,
929
+ device,
930
+ generator,
931
+ latents,
932
+ )
933
+
934
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
935
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
936
+
937
+ # 7. Prepare added time ids & embeddings
938
+ add_text_embeds = pooled_prompt_embeds
939
+ add_time_ids = self._get_add_time_ids(
940
+ original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype
941
+ )
942
+ if negative_original_size is not None and negative_target_size is not None:
943
+ negative_add_time_ids = self._get_add_time_ids(
944
+ negative_original_size,
945
+ negative_crops_coords_top_left,
946
+ negative_target_size,
947
+ dtype=prompt_embeds.dtype,
948
+ )
949
+ else:
950
+ negative_add_time_ids = add_time_ids
951
+
952
+ if do_classifier_free_guidance:
953
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
954
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
955
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
956
+
957
+ prompt_embeds = prompt_embeds.to(device)
958
+ add_text_embeds = add_text_embeds.to(device)
959
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
960
+
961
+ # 8. Denoising loop
962
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
963
+
964
+ # 7.1 Apply denoising_end
965
+ if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
966
+ discrete_timestep_cutoff = int(
967
+ round(
968
+ self.scheduler.config.num_train_timesteps
969
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
970
+ )
971
+ )
972
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
973
+ timesteps = timesteps[:num_inference_steps]
974
+
975
+ output_images = []
976
+
977
+ ############################################################### Phase 1 #################################################################
978
+
979
+ print("### Phase 1 Denoising ###")
980
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
981
+ for i, t in enumerate(timesteps):
982
+ latents_for_view = latents
983
+
984
+ # expand the latents if we are doing classifier free guidance
985
+ latent_model_input = latents.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latents
986
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
987
+
988
+ # predict the noise residual
989
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
990
+ noise_pred = self.unet(
991
+ latent_model_input,
992
+ t,
993
+ encoder_hidden_states=prompt_embeds,
994
+ cross_attention_kwargs=cross_attention_kwargs,
995
+ added_cond_kwargs=added_cond_kwargs,
996
+ return_dict=False,
997
+ )[0]
998
+
999
+ # perform guidance
1000
+ if do_classifier_free_guidance:
1001
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1002
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1003
+
1004
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1005
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1006
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1007
+
1008
+ # compute the previous noisy sample x_t -> x_t-1
1009
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1010
+
1011
+ # call the callback, if provided
1012
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1013
+ progress_bar.update()
1014
+ if callback is not None and i % callback_steps == 0:
1015
+ step_idx = i // getattr(self.scheduler, "order", 1)
1016
+ callback(step_idx, t, latents)
1017
+
1018
+ anchor_mean = latents.mean()
1019
+ anchor_std = latents.std()
1020
+ if not output_type == "latent":
1021
+ # make sure the VAE is in float32 mode, as it overflows in float16
1022
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1023
+
1024
+ if needs_upcasting:
1025
+ self.upcast_vae()
1026
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1027
+ print("### Phase 1 Decoding ###")
1028
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1029
+ # cast back to fp16 if needed
1030
+ if needs_upcasting:
1031
+ self.vae.to(dtype=torch.float16)
1032
+
1033
+ image = self.image_processor.postprocess(image, output_type=output_type)
1034
+ if show_image:
1035
+ plt.figure(figsize=(10, 10))
1036
+ plt.imshow(image[0])
1037
+ plt.axis("off") # Turn off axis numbers and ticks
1038
+ plt.show()
1039
+ output_images.append(image[0])
1040
+
1041
+ ####################################################### Phase 2+ #####################################################
1042
+
1043
+ for current_scale_num in range(2, scale_num + 1):
1044
+ print("### Phase {} Denoising ###".format(current_scale_num))
1045
+ current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1046
+ current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num
1047
+ if height > width:
1048
+ current_width = int(current_width * aspect_ratio)
1049
+ else:
1050
+ current_height = int(current_height * aspect_ratio)
1051
+
1052
+ latents = F.interpolate(
1053
+ latents,
1054
+ size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)),
1055
+ mode="bicubic",
1056
+ )
1057
+
1058
+ noise_latents = []
1059
+ noise = torch.randn_like(latents)
1060
+ for timestep in timesteps:
1061
+ noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0))
1062
+ noise_latents.append(noise_latent)
1063
+ latents = noise_latents[0]
1064
+
1065
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1066
+ for i, t in enumerate(timesteps):
1067
+ count = torch.zeros_like(latents)
1068
+ value = torch.zeros_like(latents)
1069
+ cosine_factor = (
1070
+ 0.5
1071
+ * (
1072
+ 1
1073
+ + torch.cos(
1074
+ torch.pi
1075
+ * (self.scheduler.config.num_train_timesteps - t)
1076
+ / self.scheduler.config.num_train_timesteps
1077
+ )
1078
+ ).cpu()
1079
+ )
1080
+
1081
+ c1 = cosine_factor**cosine_scale_1
1082
+ latents = latents * (1 - c1) + noise_latents[i] * c1
1083
+
1084
+ ############################################# MultiDiffusion #############################################
1085
+
1086
+ views = self.get_views(
1087
+ current_height,
1088
+ current_width,
1089
+ stride=stride,
1090
+ window_size=self.unet.config.sample_size,
1091
+ random_jitter=True,
1092
+ )
1093
+ views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1094
+
1095
+ jitter_range = (self.unet.config.sample_size - stride) // 4
1096
+ latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), "constant", 0)
1097
+
1098
+ count_local = torch.zeros_like(latents_)
1099
+ value_local = torch.zeros_like(latents_)
1100
+
1101
+ for j, batch_view in enumerate(views_batch):
1102
+ vb_size = len(batch_view)
1103
+
1104
+ # get the latents corresponding to the current view coordinates
1105
+ latents_for_view = torch.cat(
1106
+ [
1107
+ latents_[:, :, h_start:h_end, w_start:w_end]
1108
+ for h_start, h_end, w_start, w_end in batch_view
1109
+ ]
1110
+ )
1111
+
1112
+ # expand the latents if we are doing classifier free guidance
1113
+ latent_model_input = latents_for_view
1114
+ latent_model_input = (
1115
+ latent_model_input.repeat_interleave(2, dim=0)
1116
+ if do_classifier_free_guidance
1117
+ else latent_model_input
1118
+ )
1119
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1120
+
1121
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1122
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1123
+ add_time_ids_input = []
1124
+ for h_start, h_end, w_start, w_end in batch_view:
1125
+ add_time_ids_ = add_time_ids.clone()
1126
+ add_time_ids_[:, 2] = h_start * self.vae_scale_factor
1127
+ add_time_ids_[:, 3] = w_start * self.vae_scale_factor
1128
+ add_time_ids_input.append(add_time_ids_)
1129
+ add_time_ids_input = torch.cat(add_time_ids_input)
1130
+
1131
+ # predict the noise residual
1132
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1133
+ noise_pred = self.unet(
1134
+ latent_model_input,
1135
+ t,
1136
+ encoder_hidden_states=prompt_embeds_input,
1137
+ cross_attention_kwargs=cross_attention_kwargs,
1138
+ added_cond_kwargs=added_cond_kwargs,
1139
+ return_dict=False,
1140
+ )[0]
1141
+
1142
+ if do_classifier_free_guidance:
1143
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1144
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1145
+
1146
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1147
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1148
+ noise_pred = rescale_noise_cfg(
1149
+ noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
1150
+ )
1151
+
1152
+ # compute the previous noisy sample x_t -> x_t-1
1153
+ self.scheduler._init_step_index(t)
1154
+ latents_denoised_batch = self.scheduler.step(
1155
+ noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
1156
+ )[0]
1157
+
1158
+ # extract value from batch
1159
+ for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip(
1160
+ latents_denoised_batch.chunk(vb_size), batch_view
1161
+ ):
1162
+ value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised
1163
+ count_local[:, :, h_start:h_end, w_start:w_end] += 1
1164
+
1165
+ value_local = value_local[
1166
+ :,
1167
+ :,
1168
+ jitter_range : jitter_range + current_height // self.vae_scale_factor,
1169
+ jitter_range : jitter_range + current_width // self.vae_scale_factor,
1170
+ ]
1171
+ count_local = count_local[
1172
+ :,
1173
+ :,
1174
+ jitter_range : jitter_range + current_height // self.vae_scale_factor,
1175
+ jitter_range : jitter_range + current_width // self.vae_scale_factor,
1176
+ ]
1177
+
1178
+ c2 = cosine_factor**cosine_scale_2
1179
+
1180
+ value += value_local / count_local * (1 - c2)
1181
+ count += torch.ones_like(value_local) * (1 - c2)
1182
+
1183
+ ############################################# Dilated Sampling #############################################
1184
+
1185
+ views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)]
1186
+ views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)]
1187
+
1188
+ h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num
1189
+ w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num
1190
+ latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), "constant", 0)
1191
+
1192
+ count_global = torch.zeros_like(latents_)
1193
+ value_global = torch.zeros_like(latents_)
1194
+
1195
+ c3 = 0.99 * cosine_factor**cosine_scale_3 + 1e-2
1196
+ std_, mean_ = latents_.std(), latents_.mean()
1197
+ latents_gaussian = gaussian_filter(
1198
+ latents_, kernel_size=(2 * current_scale_num - 1), sigma=sigma * c3
1199
+ )
1200
+ latents_gaussian = (
1201
+ latents_gaussian - latents_gaussian.mean()
1202
+ ) / latents_gaussian.std() * std_ + mean_
1203
+
1204
+ for j, batch_view in enumerate(views_batch):
1205
+ latents_for_view = torch.cat(
1206
+ [latents_[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
1207
+ )
1208
+ latents_for_view_gaussian = torch.cat(
1209
+ [latents_gaussian[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view]
1210
+ )
1211
+
1212
+ vb_size = latents_for_view.size(0)
1213
+
1214
+ # expand the latents if we are doing classifier free guidance
1215
+ latent_model_input = latents_for_view_gaussian
1216
+ latent_model_input = (
1217
+ latent_model_input.repeat_interleave(2, dim=0)
1218
+ if do_classifier_free_guidance
1219
+ else latent_model_input
1220
+ )
1221
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1222
+
1223
+ prompt_embeds_input = torch.cat([prompt_embeds] * vb_size)
1224
+ add_text_embeds_input = torch.cat([add_text_embeds] * vb_size)
1225
+ add_time_ids_input = torch.cat([add_time_ids] * vb_size)
1226
+
1227
+ # predict the noise residual
1228
+ added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input}
1229
+ noise_pred = self.unet(
1230
+ latent_model_input,
1231
+ t,
1232
+ encoder_hidden_states=prompt_embeds_input,
1233
+ cross_attention_kwargs=cross_attention_kwargs,
1234
+ added_cond_kwargs=added_cond_kwargs,
1235
+ return_dict=False,
1236
+ )[0]
1237
+
1238
+ if do_classifier_free_guidance:
1239
+ noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2]
1240
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1241
+
1242
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1243
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1244
+ noise_pred = rescale_noise_cfg(
1245
+ noise_pred, noise_pred_text, guidance_rescale=guidance_rescale
1246
+ )
1247
+
1248
+ # compute the previous noisy sample x_t -> x_t-1
1249
+ self.scheduler._init_step_index(t)
1250
+ latents_denoised_batch = self.scheduler.step(
1251
+ noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False
1252
+ )[0]
1253
+
1254
+ # extract value from batch
1255
+ for latents_view_denoised, (h, w) in zip(latents_denoised_batch.chunk(vb_size), batch_view):
1256
+ value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised
1257
+ count_global[:, :, h::current_scale_num, w::current_scale_num] += 1
1258
+
1259
+ c2 = cosine_factor**cosine_scale_2
1260
+
1261
+ value_global = value_global[:, :, h_pad:, w_pad:]
1262
+
1263
+ value += value_global * c2
1264
+ count += torch.ones_like(value_global) * c2
1265
+
1266
+ ###########################################################
1267
+
1268
+ latents = torch.where(count > 0, value / count, value)
1269
+
1270
+ # call the callback, if provided
1271
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1272
+ progress_bar.update()
1273
+ if callback is not None and i % callback_steps == 0:
1274
+ step_idx = i // getattr(self.scheduler, "order", 1)
1275
+ callback(step_idx, t, latents)
1276
+
1277
+ #########################################################################################################################################
1278
+
1279
+ latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean
1280
+ if not output_type == "latent":
1281
+ # make sure the VAE is in float32 mode, as it overflows in float16
1282
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1283
+
1284
+ if needs_upcasting:
1285
+ self.upcast_vae()
1286
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1287
+
1288
+ print("### Phase {} Decoding ###".format(current_scale_num))
1289
+ if multi_decoder:
1290
+ image = self.tiled_decode(latents, current_height, current_width)
1291
+ else:
1292
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1293
+
1294
+ # cast back to fp16 if needed
1295
+ if needs_upcasting:
1296
+ self.vae.to(dtype=torch.float16)
1297
+ else:
1298
+ image = latents
1299
+
1300
+ if not output_type == "latent":
1301
+ image = self.image_processor.postprocess(image, output_type=output_type)
1302
+ if show_image:
1303
+ plt.figure(figsize=(10, 10))
1304
+ plt.imshow(image[0])
1305
+ plt.axis("off") # Turn off axis numbers and ticks
1306
+ plt.show()
1307
+ output_images.append(image[0])
1308
+
1309
+ # Offload all models
1310
+ self.maybe_free_model_hooks()
1311
+
1312
+ return output_images
1313
+
1314
+ # Overrride to properly handle the loading and unloading of the additional text encoder.
1315
+ def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
1316
+ # We could have accessed the unet config from `lora_state_dict()` too. We pass
1317
+ # it here explicitly to be able to tell that it's coming from an SDXL
1318
+ # pipeline.
1319
+
1320
+ # Remove any existing hooks.
1321
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
1322
+ from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
1323
+ else:
1324
+ raise ImportError("Offloading requires `accelerate v0.17.0` or higher.")
1325
+
1326
+ is_model_cpu_offload = False
1327
+ is_sequential_cpu_offload = False
1328
+ recursive = False
1329
+ for _, component in self.components.items():
1330
+ if isinstance(component, torch.nn.Module):
1331
+ if hasattr(component, "_hf_hook"):
1332
+ is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
1333
+ is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
1334
+ logger.info(
1335
+ "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
1336
+ )
1337
+ recursive = is_sequential_cpu_offload
1338
+ remove_hook_from_module(component, recurse=recursive)
1339
+ state_dict, network_alphas = self.lora_state_dict(
1340
+ pretrained_model_name_or_path_or_dict,
1341
+ unet_config=self.unet.config,
1342
+ **kwargs,
1343
+ )
1344
+ self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet)
1345
+
1346
+ text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
1347
+ if len(text_encoder_state_dict) > 0:
1348
+ self.load_lora_into_text_encoder(
1349
+ text_encoder_state_dict,
1350
+ network_alphas=network_alphas,
1351
+ text_encoder=self.text_encoder,
1352
+ prefix="text_encoder",
1353
+ lora_scale=self.lora_scale,
1354
+ )
1355
+
1356
+ text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
1357
+ if len(text_encoder_2_state_dict) > 0:
1358
+ self.load_lora_into_text_encoder(
1359
+ text_encoder_2_state_dict,
1360
+ network_alphas=network_alphas,
1361
+ text_encoder=self.text_encoder_2,
1362
+ prefix="text_encoder_2",
1363
+ lora_scale=self.lora_scale,
1364
+ )
1365
+
1366
+ # Offload back.
1367
+ if is_model_cpu_offload:
1368
+ self.enable_model_cpu_offload()
1369
+ elif is_sequential_cpu_offload:
1370
+ self.enable_sequential_cpu_offload()
1371
+
1372
+ @classmethod
1373
+ def save_lora_weights(
1374
+ self,
1375
+ save_directory: Union[str, os.PathLike],
1376
+ unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1377
+ text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1378
+ text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1379
+ is_main_process: bool = True,
1380
+ weight_name: str = None,
1381
+ save_function: Callable = None,
1382
+ safe_serialization: bool = True,
1383
+ ):
1384
+ state_dict = {}
1385
+
1386
+ def pack_weights(layers, prefix):
1387
+ layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
1388
+ layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
1389
+ return layers_state_dict
1390
+
1391
+ if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
1392
+ raise ValueError(
1393
+ "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
1394
+ )
1395
+
1396
+ if unet_lora_layers:
1397
+ state_dict.update(pack_weights(unet_lora_layers, "unet"))
1398
+
1399
+ if text_encoder_lora_layers and text_encoder_2_lora_layers:
1400
+ state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
1401
+ state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))
1402
+
1403
+ self.write_lora_layers(
1404
+ state_dict=state_dict,
1405
+ save_directory=save_directory,
1406
+ is_main_process=is_main_process,
1407
+ weight_name=weight_name,
1408
+ save_function=save_function,
1409
+ safe_serialization=safe_serialization,
1410
+ )
1411
+
1412
+ def _remove_text_encoder_monkey_patch(self):
1413
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
1414
+ self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
v0.26.3/pipeline_fabric.py ADDED
@@ -0,0 +1,751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 FABRIC authors and the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import List, Optional, Union
15
+
16
+ import torch
17
+ from packaging import version
18
+ from PIL import Image
19
+ from transformers import CLIPTextModel, CLIPTokenizer
20
+
21
+ from diffusers import AutoencoderKL, UNet2DConditionModel
22
+ from diffusers.configuration_utils import FrozenDict
23
+ from diffusers.image_processor import VaeImageProcessor
24
+ from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin
25
+ from diffusers.models.attention import BasicTransformerBlock
26
+ from diffusers.models.attention_processor import LoRAAttnProcessor
27
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
28
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
29
+ from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusionSchedulers
30
+ from diffusers.utils import (
31
+ deprecate,
32
+ logging,
33
+ replace_example_docstring,
34
+ )
35
+ from diffusers.utils.torch_utils import randn_tensor
36
+
37
+
38
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
39
+
40
+ EXAMPLE_DOC_STRING = """
41
+ Examples:
42
+ ```py
43
+ >>> from diffusers import DiffusionPipeline
44
+ >>> import torch
45
+
46
+ >>> model_id = "dreamlike-art/dreamlike-photoreal-2.0"
47
+ >>> pipe = DiffusionPipeline(model_id, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric")
48
+ >>> pipe = pipe.to("cuda")
49
+ >>> prompt = "a giant standing in a fantasy landscape best quality"
50
+ >>> liked = [] # list of images for positive feedback
51
+ >>> disliked = [] # list of images for negative feedback
52
+ >>> image = pipe(prompt, num_images=4, liked=liked, disliked=disliked).images[0]
53
+ ```
54
+ """
55
+
56
+
57
+ class FabricCrossAttnProcessor:
58
+ def __init__(self):
59
+ self.attntion_probs = None
60
+
61
+ def __call__(
62
+ self,
63
+ attn,
64
+ hidden_states,
65
+ encoder_hidden_states=None,
66
+ attention_mask=None,
67
+ weights=None,
68
+ lora_scale=1.0,
69
+ ):
70
+ batch_size, sequence_length, _ = (
71
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
72
+ )
73
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
74
+
75
+ if isinstance(attn.processor, LoRAAttnProcessor):
76
+ query = attn.to_q(hidden_states) + lora_scale * attn.processor.to_q_lora(hidden_states)
77
+ else:
78
+ query = attn.to_q(hidden_states)
79
+
80
+ if encoder_hidden_states is None:
81
+ encoder_hidden_states = hidden_states
82
+ elif attn.norm_cross:
83
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
84
+
85
+ if isinstance(attn.processor, LoRAAttnProcessor):
86
+ key = attn.to_k(encoder_hidden_states) + lora_scale * attn.processor.to_k_lora(encoder_hidden_states)
87
+ value = attn.to_v(encoder_hidden_states) + lora_scale * attn.processor.to_v_lora(encoder_hidden_states)
88
+ else:
89
+ key = attn.to_k(encoder_hidden_states)
90
+ value = attn.to_v(encoder_hidden_states)
91
+
92
+ query = attn.head_to_batch_dim(query)
93
+ key = attn.head_to_batch_dim(key)
94
+ value = attn.head_to_batch_dim(value)
95
+
96
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
97
+
98
+ if weights is not None:
99
+ if weights.shape[0] != 1:
100
+ weights = weights.repeat_interleave(attn.heads, dim=0)
101
+ attention_probs = attention_probs * weights[:, None]
102
+ attention_probs = attention_probs / attention_probs.sum(dim=-1, keepdim=True)
103
+
104
+ hidden_states = torch.bmm(attention_probs, value)
105
+ hidden_states = attn.batch_to_head_dim(hidden_states)
106
+
107
+ # linear proj
108
+ if isinstance(attn.processor, LoRAAttnProcessor):
109
+ hidden_states = attn.to_out[0](hidden_states) + lora_scale * attn.processor.to_out_lora(hidden_states)
110
+ else:
111
+ hidden_states = attn.to_out[0](hidden_states)
112
+ # dropout
113
+ hidden_states = attn.to_out[1](hidden_states)
114
+
115
+ return hidden_states
116
+
117
+
118
+ class FabricPipeline(DiffusionPipeline):
119
+ r"""
120
+ Pipeline for text-to-image generation using Stable Diffusion and conditioning the results using feedback images.
121
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
122
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
123
+
124
+ Args:
125
+ vae ([`AutoencoderKL`]):
126
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
127
+ text_encoder ([`~transformers.CLIPTextModel`]):
128
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
129
+ tokenizer ([`~transformers.CLIPTokenizer`]):
130
+ A `CLIPTokenizer` to tokenize text.
131
+ unet ([`UNet2DConditionModel`]):
132
+ A `UNet2DConditionModel` to denoise the encoded image latents.
133
+ scheduler ([`EulerAncestralDiscreteScheduler`]):
134
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
135
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
136
+ safety_checker ([`StableDiffusionSafetyChecker`]):
137
+ Classification module that estimates whether generated images could be considered offensive or harmful.
138
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
139
+ about a model's potential harms.
140
+ """
141
+
142
+ def __init__(
143
+ self,
144
+ vae: AutoencoderKL,
145
+ text_encoder: CLIPTextModel,
146
+ tokenizer: CLIPTokenizer,
147
+ unet: UNet2DConditionModel,
148
+ scheduler: KarrasDiffusionSchedulers,
149
+ requires_safety_checker: bool = True,
150
+ ):
151
+ super().__init__()
152
+
153
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
154
+ version.parse(unet.config._diffusers_version).base_version
155
+ ) < version.parse("0.9.0.dev0")
156
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
157
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
158
+ deprecation_message = (
159
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
160
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
161
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
162
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
163
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
164
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
165
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
166
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
167
+ " the `unet/config.json` file"
168
+ )
169
+
170
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
171
+ new_config = dict(unet.config)
172
+ new_config["sample_size"] = 64
173
+ unet._internal_dict = FrozenDict(new_config)
174
+
175
+ self.register_modules(
176
+ unet=unet,
177
+ vae=vae,
178
+ text_encoder=text_encoder,
179
+ tokenizer=tokenizer,
180
+ scheduler=scheduler,
181
+ )
182
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
183
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
184
+
185
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
186
+ def _encode_prompt(
187
+ self,
188
+ prompt,
189
+ device,
190
+ num_images_per_prompt,
191
+ do_classifier_free_guidance,
192
+ negative_prompt=None,
193
+ prompt_embeds: Optional[torch.FloatTensor] = None,
194
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
195
+ lora_scale: Optional[float] = None,
196
+ ):
197
+ r"""
198
+ Encodes the prompt into text encoder hidden states.
199
+
200
+ Args:
201
+ prompt (`str` or `List[str]`, *optional*):
202
+ prompt to be encoded
203
+ device: (`torch.device`):
204
+ torch device
205
+ num_images_per_prompt (`int`):
206
+ number of images that should be generated per prompt
207
+ do_classifier_free_guidance (`bool`):
208
+ whether to use classifier free guidance or not
209
+ negative_prompt (`str` or `List[str]`, *optional*):
210
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
211
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
212
+ less than `1`).
213
+ prompt_embeds (`torch.FloatTensor`, *optional*):
214
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
215
+ provided, text embeddings will be generated from `prompt` input argument.
216
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
217
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
218
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
219
+ argument.
220
+ lora_scale (`float`, *optional*):
221
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
222
+ """
223
+ # set lora scale so that monkey patched LoRA
224
+ # function of text encoder can correctly access it
225
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
226
+ self._lora_scale = lora_scale
227
+
228
+ if prompt is not None and isinstance(prompt, str):
229
+ batch_size = 1
230
+ elif prompt is not None and isinstance(prompt, list):
231
+ batch_size = len(prompt)
232
+ else:
233
+ batch_size = prompt_embeds.shape[0]
234
+
235
+ if prompt_embeds is None:
236
+ # textual inversion: procecss multi-vector tokens if necessary
237
+ if isinstance(self, TextualInversionLoaderMixin):
238
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
239
+
240
+ text_inputs = self.tokenizer(
241
+ prompt,
242
+ padding="max_length",
243
+ max_length=self.tokenizer.model_max_length,
244
+ truncation=True,
245
+ return_tensors="pt",
246
+ )
247
+ text_input_ids = text_inputs.input_ids
248
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
249
+
250
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
251
+ text_input_ids, untruncated_ids
252
+ ):
253
+ removed_text = self.tokenizer.batch_decode(
254
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
255
+ )
256
+ logger.warning(
257
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
258
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
259
+ )
260
+
261
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
262
+ attention_mask = text_inputs.attention_mask.to(device)
263
+ else:
264
+ attention_mask = None
265
+
266
+ prompt_embeds = self.text_encoder(
267
+ text_input_ids.to(device),
268
+ attention_mask=attention_mask,
269
+ )
270
+ prompt_embeds = prompt_embeds[0]
271
+
272
+ if self.text_encoder is not None:
273
+ prompt_embeds_dtype = self.text_encoder.dtype
274
+ elif self.unet is not None:
275
+ prompt_embeds_dtype = self.unet.dtype
276
+ else:
277
+ prompt_embeds_dtype = prompt_embeds.dtype
278
+
279
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
280
+
281
+ bs_embed, seq_len, _ = prompt_embeds.shape
282
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
283
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
284
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
285
+
286
+ # get unconditional embeddings for classifier free guidance
287
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
288
+ uncond_tokens: List[str]
289
+ if negative_prompt is None:
290
+ uncond_tokens = [""] * batch_size
291
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
292
+ raise TypeError(
293
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
294
+ f" {type(prompt)}."
295
+ )
296
+ elif isinstance(negative_prompt, str):
297
+ uncond_tokens = [negative_prompt]
298
+ elif batch_size != len(negative_prompt):
299
+ raise ValueError(
300
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
301
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
302
+ " the batch size of `prompt`."
303
+ )
304
+ else:
305
+ uncond_tokens = negative_prompt
306
+
307
+ # textual inversion: procecss multi-vector tokens if necessary
308
+ if isinstance(self, TextualInversionLoaderMixin):
309
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
310
+
311
+ max_length = prompt_embeds.shape[1]
312
+ uncond_input = self.tokenizer(
313
+ uncond_tokens,
314
+ padding="max_length",
315
+ max_length=max_length,
316
+ truncation=True,
317
+ return_tensors="pt",
318
+ )
319
+
320
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
321
+ attention_mask = uncond_input.attention_mask.to(device)
322
+ else:
323
+ attention_mask = None
324
+
325
+ negative_prompt_embeds = self.text_encoder(
326
+ uncond_input.input_ids.to(device),
327
+ attention_mask=attention_mask,
328
+ )
329
+ negative_prompt_embeds = negative_prompt_embeds[0]
330
+
331
+ if do_classifier_free_guidance:
332
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
333
+ seq_len = negative_prompt_embeds.shape[1]
334
+
335
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
336
+
337
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
338
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
339
+
340
+ # For classifier free guidance, we need to do two forward passes.
341
+ # Here we concatenate the unconditional and text embeddings into a single batch
342
+ # to avoid doing two forward passes
343
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
344
+
345
+ return prompt_embeds
346
+
347
+ def get_unet_hidden_states(self, z_all, t, prompt_embd):
348
+ cached_hidden_states = []
349
+ for module in self.unet.modules():
350
+ if isinstance(module, BasicTransformerBlock):
351
+
352
+ def new_forward(self, hidden_states, *args, **kwargs):
353
+ cached_hidden_states.append(hidden_states.clone().detach().cpu())
354
+ return self.old_forward(hidden_states, *args, **kwargs)
355
+
356
+ module.attn1.old_forward = module.attn1.forward
357
+ module.attn1.forward = new_forward.__get__(module.attn1)
358
+
359
+ # run forward pass to cache hidden states, output can be discarded
360
+ _ = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
361
+
362
+ # restore original forward pass
363
+ for module in self.unet.modules():
364
+ if isinstance(module, BasicTransformerBlock):
365
+ module.attn1.forward = module.attn1.old_forward
366
+ del module.attn1.old_forward
367
+
368
+ return cached_hidden_states
369
+
370
+ def unet_forward_with_cached_hidden_states(
371
+ self,
372
+ z_all,
373
+ t,
374
+ prompt_embd,
375
+ cached_pos_hiddens: Optional[List[torch.Tensor]] = None,
376
+ cached_neg_hiddens: Optional[List[torch.Tensor]] = None,
377
+ pos_weights=(0.8, 0.8),
378
+ neg_weights=(0.5, 0.5),
379
+ ):
380
+ if cached_pos_hiddens is None and cached_neg_hiddens is None:
381
+ return self.unet(z_all, t, encoder_hidden_states=prompt_embd)
382
+
383
+ local_pos_weights = torch.linspace(*pos_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
384
+ local_neg_weights = torch.linspace(*neg_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist()
385
+ for block, pos_weight, neg_weight in zip(
386
+ self.unet.down_blocks + [self.unet.mid_block] + self.unet.up_blocks,
387
+ local_pos_weights + [pos_weights[1]] + local_pos_weights[::-1],
388
+ local_neg_weights + [neg_weights[1]] + local_neg_weights[::-1],
389
+ ):
390
+ for module in block.modules():
391
+ if isinstance(module, BasicTransformerBlock):
392
+
393
+ def new_forward(
394
+ self,
395
+ hidden_states,
396
+ pos_weight=pos_weight,
397
+ neg_weight=neg_weight,
398
+ **kwargs,
399
+ ):
400
+ cond_hiddens, uncond_hiddens = hidden_states.chunk(2, dim=0)
401
+ batch_size, d_model = cond_hiddens.shape[:2]
402
+ device, dtype = hidden_states.device, hidden_states.dtype
403
+
404
+ weights = torch.ones(batch_size, d_model, device=device, dtype=dtype)
405
+ out_pos = self.old_forward(hidden_states)
406
+ out_neg = self.old_forward(hidden_states)
407
+
408
+ if cached_pos_hiddens is not None:
409
+ cached_pos_hs = cached_pos_hiddens.pop(0).to(hidden_states.device)
410
+ cond_pos_hs = torch.cat([cond_hiddens, cached_pos_hs], dim=1)
411
+ pos_weights = weights.clone().repeat(1, 1 + cached_pos_hs.shape[1] // d_model)
412
+ pos_weights[:, d_model:] = pos_weight
413
+ attn_with_weights = FabricCrossAttnProcessor()
414
+ out_pos = attn_with_weights(
415
+ self,
416
+ cond_hiddens,
417
+ encoder_hidden_states=cond_pos_hs,
418
+ weights=pos_weights,
419
+ )
420
+ else:
421
+ out_pos = self.old_forward(cond_hiddens)
422
+
423
+ if cached_neg_hiddens is not None:
424
+ cached_neg_hs = cached_neg_hiddens.pop(0).to(hidden_states.device)
425
+ uncond_neg_hs = torch.cat([uncond_hiddens, cached_neg_hs], dim=1)
426
+ neg_weights = weights.clone().repeat(1, 1 + cached_neg_hs.shape[1] // d_model)
427
+ neg_weights[:, d_model:] = neg_weight
428
+ attn_with_weights = FabricCrossAttnProcessor()
429
+ out_neg = attn_with_weights(
430
+ self,
431
+ uncond_hiddens,
432
+ encoder_hidden_states=uncond_neg_hs,
433
+ weights=neg_weights,
434
+ )
435
+ else:
436
+ out_neg = self.old_forward(uncond_hiddens)
437
+
438
+ out = torch.cat([out_pos, out_neg], dim=0)
439
+ return out
440
+
441
+ module.attn1.old_forward = module.attn1.forward
442
+ module.attn1.forward = new_forward.__get__(module.attn1)
443
+
444
+ out = self.unet(z_all, t, encoder_hidden_states=prompt_embd)
445
+
446
+ # restore original forward pass
447
+ for module in self.unet.modules():
448
+ if isinstance(module, BasicTransformerBlock):
449
+ module.attn1.forward = module.attn1.old_forward
450
+ del module.attn1.old_forward
451
+
452
+ return out
453
+
454
+ def preprocess_feedback_images(self, images, vae, dim, device, dtype, generator) -> torch.tensor:
455
+ images_t = [self.image_to_tensor(img, dim, dtype) for img in images]
456
+ images_t = torch.stack(images_t).to(device)
457
+ latents = vae.config.scaling_factor * vae.encode(images_t).latent_dist.sample(generator)
458
+
459
+ return torch.cat([latents], dim=0)
460
+
461
+ def check_inputs(
462
+ self,
463
+ prompt,
464
+ negative_prompt=None,
465
+ liked=None,
466
+ disliked=None,
467
+ height=None,
468
+ width=None,
469
+ ):
470
+ if prompt is None:
471
+ raise ValueError("Provide `prompt`. Cannot leave both `prompt` undefined.")
472
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
473
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
474
+
475
+ if negative_prompt is not None and (
476
+ not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list)
477
+ ):
478
+ raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
479
+
480
+ if liked is not None and not isinstance(liked, list):
481
+ raise ValueError(f"`liked` has to be of type `list` but is {type(liked)}")
482
+
483
+ if disliked is not None and not isinstance(disliked, list):
484
+ raise ValueError(f"`disliked` has to be of type `list` but is {type(disliked)}")
485
+
486
+ if height is not None and not isinstance(height, int):
487
+ raise ValueError(f"`height` has to be of type `int` but is {type(height)}")
488
+
489
+ if width is not None and not isinstance(width, int):
490
+ raise ValueError(f"`width` has to be of type `int` but is {type(width)}")
491
+
492
+ @torch.no_grad()
493
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
494
+ def __call__(
495
+ self,
496
+ prompt: Optional[Union[str, List[str]]] = "",
497
+ negative_prompt: Optional[Union[str, List[str]]] = "lowres, bad anatomy, bad hands, cropped, worst quality",
498
+ liked: Optional[Union[List[str], List[Image.Image]]] = [],
499
+ disliked: Optional[Union[List[str], List[Image.Image]]] = [],
500
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
501
+ height: int = 512,
502
+ width: int = 512,
503
+ return_dict: bool = True,
504
+ num_images: int = 4,
505
+ guidance_scale: float = 7.0,
506
+ num_inference_steps: int = 20,
507
+ output_type: Optional[str] = "pil",
508
+ feedback_start_ratio: float = 0.33,
509
+ feedback_end_ratio: float = 0.66,
510
+ min_weight: float = 0.05,
511
+ max_weight: float = 0.8,
512
+ neg_scale: float = 0.5,
513
+ pos_bottleneck_scale: float = 1.0,
514
+ neg_bottleneck_scale: float = 1.0,
515
+ latents: Optional[torch.FloatTensor] = None,
516
+ ):
517
+ r"""
518
+ The call function to the pipeline for generation. Generate a trajectory of images with binary feedback. The
519
+ feedback can be given as a list of liked and disliked images.
520
+
521
+ Args:
522
+ prompt (`str` or `List[str]`, *optional*):
523
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`
524
+ instead.
525
+ negative_prompt (`str` or `List[str]`, *optional*):
526
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
527
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
528
+ liked (`List[Image.Image]` or `List[str]`, *optional*):
529
+ Encourages images with liked features.
530
+ disliked (`List[Image.Image]` or `List[str]`, *optional*):
531
+ Discourages images with disliked features.
532
+ generator (`torch.Generator` or `List[torch.Generator]` or `int`, *optional*):
533
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) or an `int` to
534
+ make generation deterministic.
535
+ height (`int`, *optional*, defaults to 512):
536
+ Height of the generated image.
537
+ width (`int`, *optional*, defaults to 512):
538
+ Width of the generated image.
539
+ num_images (`int`, *optional*, defaults to 4):
540
+ The number of images to generate per prompt.
541
+ guidance_scale (`float`, *optional*, defaults to 7.0):
542
+ A higher guidance scale value encourages the model to generate images closely linked to the text
543
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
544
+ num_inference_steps (`int`, *optional*, defaults to 20):
545
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
546
+ expense of slower inference.
547
+ output_type (`str`, *optional*, defaults to `"pil"`):
548
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
549
+ return_dict (`bool`, *optional*, defaults to `True`):
550
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
551
+ plain tuple.
552
+ feedback_start_ratio (`float`, *optional*, defaults to `.33`):
553
+ Start point for providing feedback (between 0 and 1).
554
+ feedback_end_ratio (`float`, *optional*, defaults to `.66`):
555
+ End point for providing feedback (between 0 and 1).
556
+ min_weight (`float`, *optional*, defaults to `.05`):
557
+ Minimum weight for feedback.
558
+ max_weight (`float`, *optional*, defults tp `1.0`):
559
+ Maximum weight for feedback.
560
+ neg_scale (`float`, *optional*, defaults to `.5`):
561
+ Scale factor for negative feedback.
562
+
563
+ Examples:
564
+
565
+ Returns:
566
+ [`~pipelines.fabric.FabricPipelineOutput`] or `tuple`:
567
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
568
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
569
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
570
+ "not-safe-for-work" (nsfw) content.
571
+
572
+ """
573
+
574
+ self.check_inputs(prompt, negative_prompt, liked, disliked)
575
+
576
+ device = self._execution_device
577
+ dtype = self.unet.dtype
578
+
579
+ if isinstance(prompt, str) and prompt is not None:
580
+ batch_size = 1
581
+ elif isinstance(prompt, list) and prompt is not None:
582
+ batch_size = len(prompt)
583
+ else:
584
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
585
+
586
+ if isinstance(negative_prompt, str):
587
+ negative_prompt = negative_prompt
588
+ elif isinstance(negative_prompt, list):
589
+ negative_prompt = negative_prompt
590
+ else:
591
+ assert len(negative_prompt) == batch_size
592
+
593
+ shape = (
594
+ batch_size * num_images,
595
+ self.unet.config.in_channels,
596
+ height // self.vae_scale_factor,
597
+ width // self.vae_scale_factor,
598
+ )
599
+ latent_noise = randn_tensor(
600
+ shape,
601
+ device=device,
602
+ dtype=dtype,
603
+ generator=generator,
604
+ )
605
+
606
+ positive_latents = (
607
+ self.preprocess_feedback_images(liked, self.vae, (height, width), device, dtype, generator)
608
+ if liked and len(liked) > 0
609
+ else torch.tensor(
610
+ [],
611
+ device=device,
612
+ dtype=dtype,
613
+ )
614
+ )
615
+ negative_latents = (
616
+ self.preprocess_feedback_images(disliked, self.vae, (height, width), device, dtype, generator)
617
+ if disliked and len(disliked) > 0
618
+ else torch.tensor(
619
+ [],
620
+ device=device,
621
+ dtype=dtype,
622
+ )
623
+ )
624
+
625
+ do_classifier_free_guidance = guidance_scale > 0.1
626
+
627
+ (prompt_neg_embs, prompt_pos_embs) = self._encode_prompt(
628
+ prompt,
629
+ device,
630
+ num_images,
631
+ do_classifier_free_guidance,
632
+ negative_prompt,
633
+ ).split([num_images * batch_size, num_images * batch_size])
634
+
635
+ batched_prompt_embd = torch.cat([prompt_pos_embs, prompt_neg_embs], dim=0)
636
+
637
+ null_tokens = self.tokenizer(
638
+ [""],
639
+ return_tensors="pt",
640
+ max_length=self.tokenizer.model_max_length,
641
+ padding="max_length",
642
+ truncation=True,
643
+ )
644
+
645
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
646
+ attention_mask = null_tokens.attention_mask.to(device)
647
+ else:
648
+ attention_mask = None
649
+
650
+ null_prompt_emb = self.text_encoder(
651
+ input_ids=null_tokens.input_ids.to(device),
652
+ attention_mask=attention_mask,
653
+ ).last_hidden_state
654
+
655
+ null_prompt_emb = null_prompt_emb.to(device=device, dtype=dtype)
656
+
657
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
658
+ timesteps = self.scheduler.timesteps
659
+ latent_noise = latent_noise * self.scheduler.init_noise_sigma
660
+
661
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
662
+
663
+ ref_start_idx = round(len(timesteps) * feedback_start_ratio)
664
+ ref_end_idx = round(len(timesteps) * feedback_end_ratio)
665
+
666
+ with self.progress_bar(total=num_inference_steps) as pbar:
667
+ for i, t in enumerate(timesteps):
668
+ sigma = self.scheduler.sigma_t[t] if hasattr(self.scheduler, "sigma_t") else 0
669
+ if hasattr(self.scheduler, "sigmas"):
670
+ sigma = self.scheduler.sigmas[i]
671
+
672
+ alpha_hat = 1 / (sigma**2 + 1)
673
+
674
+ z_single = self.scheduler.scale_model_input(latent_noise, t)
675
+ z_all = torch.cat([z_single] * 2, dim=0)
676
+ z_ref = torch.cat([positive_latents, negative_latents], dim=0)
677
+
678
+ if i >= ref_start_idx and i <= ref_end_idx:
679
+ weight_factor = max_weight
680
+ else:
681
+ weight_factor = min_weight
682
+
683
+ pos_ws = (weight_factor, weight_factor * pos_bottleneck_scale)
684
+ neg_ws = (weight_factor * neg_scale, weight_factor * neg_scale * neg_bottleneck_scale)
685
+
686
+ if z_ref.size(0) > 0 and weight_factor > 0:
687
+ noise = torch.randn_like(z_ref)
688
+ if isinstance(self.scheduler, EulerAncestralDiscreteScheduler):
689
+ z_ref_noised = (alpha_hat**0.5 * z_ref + (1 - alpha_hat) ** 0.5 * noise).type(dtype)
690
+ else:
691
+ z_ref_noised = self.scheduler.add_noise(z_ref, noise, t)
692
+
693
+ ref_prompt_embd = torch.cat(
694
+ [null_prompt_emb] * (len(positive_latents) + len(negative_latents)), dim=0
695
+ )
696
+ cached_hidden_states = self.get_unet_hidden_states(z_ref_noised, t, ref_prompt_embd)
697
+
698
+ n_pos, n_neg = positive_latents.shape[0], negative_latents.shape[0]
699
+ cached_pos_hs, cached_neg_hs = [], []
700
+ for hs in cached_hidden_states:
701
+ cached_pos, cached_neg = hs.split([n_pos, n_neg], dim=0)
702
+ cached_pos = cached_pos.view(1, -1, *cached_pos.shape[2:]).expand(num_images, -1, -1)
703
+ cached_neg = cached_neg.view(1, -1, *cached_neg.shape[2:]).expand(num_images, -1, -1)
704
+ cached_pos_hs.append(cached_pos)
705
+ cached_neg_hs.append(cached_neg)
706
+
707
+ if n_pos == 0:
708
+ cached_pos_hs = None
709
+ if n_neg == 0:
710
+ cached_neg_hs = None
711
+ else:
712
+ cached_pos_hs, cached_neg_hs = None, None
713
+ unet_out = self.unet_forward_with_cached_hidden_states(
714
+ z_all,
715
+ t,
716
+ prompt_embd=batched_prompt_embd,
717
+ cached_pos_hiddens=cached_pos_hs,
718
+ cached_neg_hiddens=cached_neg_hs,
719
+ pos_weights=pos_ws,
720
+ neg_weights=neg_ws,
721
+ )[0]
722
+
723
+ noise_cond, noise_uncond = unet_out.chunk(2)
724
+ guidance = noise_cond - noise_uncond
725
+ noise_pred = noise_uncond + guidance_scale * guidance
726
+ latent_noise = self.scheduler.step(noise_pred, t, latent_noise)[0]
727
+
728
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
729
+ pbar.update()
730
+
731
+ y = self.vae.decode(latent_noise / self.vae.config.scaling_factor, return_dict=False)[0]
732
+ imgs = self.image_processor.postprocess(
733
+ y,
734
+ output_type=output_type,
735
+ )
736
+
737
+ if not return_dict:
738
+ return imgs
739
+
740
+ return StableDiffusionPipelineOutput(imgs, False)
741
+
742
+ def image_to_tensor(self, image: Union[str, Image.Image], dim: tuple, dtype):
743
+ """
744
+ Convert latent PIL image to a torch tensor for further processing.
745
+ """
746
+ if isinstance(image, str):
747
+ image = Image.open(image)
748
+ if not image.mode == "RGB":
749
+ image = image.convert("RGB")
750
+ image = self.image_processor.preprocess(image, height=dim[0], width=dim[1])[0]
751
+ return image.type(dtype)
v0.26.3/pipeline_null_text_inversion.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import os
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as nnf
7
+ from PIL import Image
8
+ from torch.optim.adam import Adam
9
+ from tqdm import tqdm
10
+
11
+ from diffusers import StableDiffusionPipeline
12
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
13
+
14
+
15
+ def retrieve_timesteps(
16
+ scheduler,
17
+ num_inference_steps=None,
18
+ device=None,
19
+ timesteps=None,
20
+ **kwargs,
21
+ ):
22
+ """
23
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
24
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
25
+ Args:
26
+ scheduler (`SchedulerMixin`):
27
+ The scheduler to get timesteps from.
28
+ num_inference_steps (`int`):
29
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
30
+ `timesteps` must be `None`.
31
+ device (`str` or `torch.device`, *optional*):
32
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
33
+ timesteps (`List[int]`, *optional*):
34
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
35
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
36
+ must be `None`.
37
+
38
+ Returns:
39
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
40
+ second element is the number of inference steps.
41
+ """
42
+ if timesteps is not None:
43
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
44
+ if not accepts_timesteps:
45
+ raise ValueError(
46
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
47
+ f" timestep schedules. Please check whether you are using the correct scheduler."
48
+ )
49
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
50
+ timesteps = scheduler.timesteps
51
+ num_inference_steps = len(timesteps)
52
+ else:
53
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
54
+ timesteps = scheduler.timesteps
55
+ return timesteps, num_inference_steps
56
+
57
+
58
+ class NullTextPipeline(StableDiffusionPipeline):
59
+ def get_noise_pred(self, latents, t, context):
60
+ latents_input = torch.cat([latents] * 2)
61
+ guidance_scale = 7.5
62
+ noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"]
63
+ noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
64
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
65
+ latents = self.prev_step(noise_pred, t, latents)
66
+ return latents
67
+
68
+ def get_noise_pred_single(self, latents, t, context):
69
+ noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"]
70
+ return noise_pred
71
+
72
+ @torch.no_grad()
73
+ def image2latent(self, image_path):
74
+ image = Image.open(image_path).convert("RGB")
75
+ image = np.array(image)
76
+ image = torch.from_numpy(image).float() / 127.5 - 1
77
+ image = image.permute(2, 0, 1).unsqueeze(0).to(self.device)
78
+ latents = self.vae.encode(image)["latent_dist"].mean
79
+ latents = latents * 0.18215
80
+ return latents
81
+
82
+ @torch.no_grad()
83
+ def latent2image(self, latents):
84
+ latents = 1 / 0.18215 * latents.detach()
85
+ image = self.vae.decode(latents)["sample"].detach()
86
+ image = self.processor.postprocess(image, output_type="pil")[0]
87
+ return image
88
+
89
+ def prev_step(self, model_output, timestep, sample):
90
+ prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
91
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
92
+ alpha_prod_t_prev = (
93
+ self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod
94
+ )
95
+ beta_prod_t = 1 - alpha_prod_t
96
+ pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
97
+ pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output
98
+ prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction
99
+ return prev_sample
100
+
101
+ def next_step(self, model_output, timestep, sample):
102
+ timestep, next_timestep = (
103
+ min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999),
104
+ timestep,
105
+ )
106
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
107
+ alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep]
108
+ beta_prod_t = 1 - alpha_prod_t
109
+ next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
110
+ next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
111
+ next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction
112
+ return next_sample
113
+
114
+ def null_optimization(self, latents, context, num_inner_steps, epsilon):
115
+ uncond_embeddings, cond_embeddings = context.chunk(2)
116
+ uncond_embeddings_list = []
117
+ latent_cur = latents[-1]
118
+ bar = tqdm(total=num_inner_steps * self.num_inference_steps)
119
+ for i in range(self.num_inference_steps):
120
+ uncond_embeddings = uncond_embeddings.clone().detach()
121
+ uncond_embeddings.requires_grad = True
122
+ optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0))
123
+ latent_prev = latents[len(latents) - i - 2]
124
+ t = self.scheduler.timesteps[i]
125
+ with torch.no_grad():
126
+ noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings)
127
+ for j in range(num_inner_steps):
128
+ noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings)
129
+ noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond)
130
+ latents_prev_rec = self.prev_step(noise_pred, t, latent_cur)
131
+ loss = nnf.mse_loss(latents_prev_rec, latent_prev)
132
+ optimizer.zero_grad()
133
+ loss.backward()
134
+ optimizer.step()
135
+ loss_item = loss.item()
136
+ bar.update()
137
+ if loss_item < epsilon + i * 2e-5:
138
+ break
139
+ for j in range(j + 1, num_inner_steps):
140
+ bar.update()
141
+ uncond_embeddings_list.append(uncond_embeddings[:1].detach())
142
+ with torch.no_grad():
143
+ context = torch.cat([uncond_embeddings, cond_embeddings])
144
+ latent_cur = self.get_noise_pred(latent_cur, t, context)
145
+ bar.close()
146
+ return uncond_embeddings_list
147
+
148
+ @torch.no_grad()
149
+ def ddim_inversion_loop(self, latent, context):
150
+ self.scheduler.set_timesteps(self.num_inference_steps)
151
+ _, cond_embeddings = context.chunk(2)
152
+ all_latent = [latent]
153
+ latent = latent.clone().detach()
154
+ with torch.no_grad():
155
+ for i in range(0, self.num_inference_steps):
156
+ t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1]
157
+ noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"]
158
+ latent = self.next_step(noise_pred, t, latent)
159
+ all_latent.append(latent)
160
+ return all_latent
161
+
162
+ def get_context(self, prompt):
163
+ uncond_input = self.tokenizer(
164
+ [""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt"
165
+ )
166
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
167
+ text_input = self.tokenizer(
168
+ [prompt],
169
+ padding="max_length",
170
+ max_length=self.tokenizer.model_max_length,
171
+ truncation=True,
172
+ return_tensors="pt",
173
+ )
174
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
175
+ context = torch.cat([uncond_embeddings, text_embeddings])
176
+ return context
177
+
178
+ def invert(
179
+ self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50
180
+ ):
181
+ self.num_inference_steps = num_inference_steps
182
+ context = self.get_context(prompt)
183
+ latent = self.image2latent(image_path)
184
+ ddim_latents = self.ddim_inversion_loop(latent, context)
185
+ if os.path.exists(image_path + ".pt"):
186
+ uncond_embeddings = torch.load(image_path + ".pt")
187
+ else:
188
+ uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon)
189
+ uncond_embeddings = torch.stack(uncond_embeddings, 0)
190
+ torch.save(uncond_embeddings, image_path + ".pt")
191
+ return ddim_latents[-1], uncond_embeddings
192
+
193
+ @torch.no_grad()
194
+ def __call__(
195
+ self,
196
+ prompt,
197
+ uncond_embeddings,
198
+ inverted_latent,
199
+ num_inference_steps: int = 50,
200
+ timesteps=None,
201
+ guidance_scale=7.5,
202
+ negative_prompt=None,
203
+ num_images_per_prompt=1,
204
+ generator=None,
205
+ latents=None,
206
+ prompt_embeds=None,
207
+ negative_prompt_embeds=None,
208
+ output_type="pil",
209
+ ):
210
+ self._guidance_scale = guidance_scale
211
+ # 0. Default height and width to unet
212
+ height = self.unet.config.sample_size * self.vae_scale_factor
213
+ width = self.unet.config.sample_size * self.vae_scale_factor
214
+ # to deal with lora scaling and other possible forward hook
215
+ callback_steps = None
216
+ # 1. Check inputs. Raise error if not correct
217
+ self.check_inputs(
218
+ prompt,
219
+ height,
220
+ width,
221
+ callback_steps,
222
+ negative_prompt,
223
+ prompt_embeds,
224
+ negative_prompt_embeds,
225
+ )
226
+ # 2. Define call parameter
227
+ device = self._execution_device
228
+ # 3. Encode input prompt
229
+ prompt_embeds, _ = self.encode_prompt(
230
+ prompt,
231
+ device,
232
+ num_images_per_prompt,
233
+ self.do_classifier_free_guidance,
234
+ negative_prompt,
235
+ prompt_embeds=prompt_embeds,
236
+ negative_prompt_embeds=negative_prompt_embeds,
237
+ )
238
+ # 4. Prepare timesteps
239
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
240
+ latents = inverted_latent
241
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
242
+ for i, t in enumerate(timesteps):
243
+ noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"]
244
+ noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"]
245
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
246
+ # compute the previous noisy sample x_t -> x_t-1
247
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
248
+ progress_bar.update()
249
+ if not output_type == "latent":
250
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
251
+ 0
252
+ ]
253
+ else:
254
+ image = latents
255
+ image = self.image_processor.postprocess(
256
+ image, output_type=output_type, do_denormalize=[True] * image.shape[0]
257
+ )
258
+ # Offload all models
259
+ self.maybe_free_model_hooks()
260
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
v0.26.3/pipeline_prompt2prompt.py ADDED
@@ -0,0 +1,861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import abc
18
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+ import torch.nn.functional as F
23
+
24
+ from ...src.diffusers.models.attention import Attention
25
+ from ...src.diffusers.pipelines.stable_diffusion import StableDiffusionPipeline, StableDiffusionPipelineOutput
26
+
27
+
28
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
29
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
30
+ """
31
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
32
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
33
+ """
34
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
35
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
36
+ # rescale the results from guidance (fixes overexposure)
37
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
38
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
39
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
40
+ return noise_cfg
41
+
42
+
43
+ class Prompt2PromptPipeline(StableDiffusionPipeline):
44
+ r"""
45
+ Args:
46
+ Prompt-to-Prompt-Pipeline for text-to-image generation using Stable Diffusion. This model inherits from
47
+ [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for
48
+ all the pipelines (such as downloading or saving, running on a particular device, etc.)
49
+ vae ([`AutoencoderKL`]):
50
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
51
+ text_encoder ([`CLIPTextModel`]):
52
+ Frozen text-encoder. Stable Diffusion uses the text portion of
53
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
54
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
55
+ tokenizer (`CLIPTokenizer`):
56
+ Tokenizer of class
57
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
58
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler
59
+ ([`SchedulerMixin`]):
60
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
61
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
62
+ safety_checker ([`StableDiffusionSafetyChecker`]):
63
+ Classification module that estimates whether generated images could be considered offensive or harmful.
64
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
65
+ feature_extractor ([`CLIPFeatureExtractor`]):
66
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
67
+ """
68
+
69
+ _optional_components = ["safety_checker", "feature_extractor"]
70
+
71
+ @torch.no_grad()
72
+ def __call__(
73
+ self,
74
+ prompt: Union[str, List[str]],
75
+ height: Optional[int] = None,
76
+ width: Optional[int] = None,
77
+ num_inference_steps: int = 50,
78
+ guidance_scale: float = 7.5,
79
+ negative_prompt: Optional[Union[str, List[str]]] = None,
80
+ num_images_per_prompt: Optional[int] = 1,
81
+ eta: float = 0.0,
82
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
83
+ latents: Optional[torch.FloatTensor] = None,
84
+ prompt_embeds: Optional[torch.FloatTensor] = None,
85
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
86
+ output_type: Optional[str] = "pil",
87
+ return_dict: bool = True,
88
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
89
+ callback_steps: Optional[int] = 1,
90
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
91
+ guidance_rescale: float = 0.0,
92
+ ):
93
+ r"""
94
+ Function invoked when calling the pipeline for generation.
95
+
96
+ Args:
97
+ prompt (`str` or `List[str]`):
98
+ The prompt or prompts to guide the image generation.
99
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
100
+ The height in pixels of the generated image.
101
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
102
+ The width in pixels of the generated image.
103
+ num_inference_steps (`int`, *optional*, defaults to 50):
104
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
105
+ expense of slower inference.
106
+ guidance_scale (`float`, *optional*, defaults to 7.5):
107
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
108
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
109
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
110
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
111
+ usually at the expense of lower image quality.
112
+ negative_prompt (`str` or `List[str]`, *optional*):
113
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
114
+ if `guidance_scale` is less than `1`).
115
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
116
+ The number of images to generate per prompt.
117
+ eta (`float`, *optional*, defaults to 0.0):
118
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
119
+ [`schedulers.DDIMScheduler`], will be ignored for others.
120
+ generator (`torch.Generator`, *optional*):
121
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
122
+ to make generation deterministic.
123
+ latents (`torch.FloatTensor`, *optional*):
124
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
125
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
126
+ tensor will ge generated by sampling using the supplied random `generator`.
127
+ output_type (`str`, *optional*, defaults to `"pil"`):
128
+ The output format of the generate image. Choose between
129
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
130
+ return_dict (`bool`, *optional*, defaults to `True`):
131
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
132
+ plain tuple.
133
+ callback (`Callable`, *optional*):
134
+ A function that will be called every `callback_steps` steps during inference. The function will be
135
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
136
+ callback_steps (`int`, *optional*, defaults to 1):
137
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
138
+ called at every step.
139
+ cross_attention_kwargs (`dict`, *optional*):
140
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
141
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
142
+
143
+ The keyword arguments to configure the edit are:
144
+ - edit_type (`str`). The edit type to apply. Can be either of `replace`, `refine`, `reweight`.
145
+ - n_cross_replace (`int`): Number of diffusion steps in which cross attention should be replaced
146
+ - n_self_replace (`int`): Number of diffusion steps in which self attention should be replaced
147
+ - local_blend_words(`List[str]`, *optional*, default to `None`): Determines which area should be
148
+ changed. If None, then the whole image can be changed.
149
+ - equalizer_words(`List[str]`, *optional*, default to `None`): Required for edit type `reweight`.
150
+ Determines which words should be enhanced.
151
+ - equalizer_strengths (`List[float]`, *optional*, default to `None`) Required for edit type `reweight`.
152
+ Determines which how much the words in `equalizer_words` should be enhanced.
153
+
154
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
155
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
156
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
157
+ using zero terminal SNR.
158
+
159
+ Returns:
160
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
161
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
162
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
163
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
164
+ (nsfw) content, according to the `safety_checker`.
165
+ """
166
+
167
+ self.controller = create_controller(
168
+ prompt, cross_attention_kwargs, num_inference_steps, tokenizer=self.tokenizer, device=self.device
169
+ )
170
+ self.register_attention_control(self.controller) # add attention controller
171
+
172
+ # 0. Default height and width to unet
173
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
174
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
175
+
176
+ # 1. Check inputs. Raise error if not correct
177
+ self.check_inputs(prompt, height, width, callback_steps)
178
+
179
+ # 2. Define call parameters
180
+ if prompt is not None and isinstance(prompt, str):
181
+ batch_size = 1
182
+ elif prompt is not None and isinstance(prompt, list):
183
+ batch_size = len(prompt)
184
+ else:
185
+ batch_size = prompt_embeds.shape[0]
186
+
187
+ device = self._execution_device
188
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
189
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
190
+ # corresponds to doing no classifier free guidance.
191
+ do_classifier_free_guidance = guidance_scale > 1.0
192
+
193
+ # 3. Encode input prompt
194
+ text_encoder_lora_scale = (
195
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
196
+ )
197
+ prompt_embeds = self._encode_prompt(
198
+ prompt,
199
+ device,
200
+ num_images_per_prompt,
201
+ do_classifier_free_guidance,
202
+ negative_prompt,
203
+ prompt_embeds=prompt_embeds,
204
+ negative_prompt_embeds=negative_prompt_embeds,
205
+ lora_scale=text_encoder_lora_scale,
206
+ )
207
+
208
+ # 4. Prepare timesteps
209
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
210
+ timesteps = self.scheduler.timesteps
211
+
212
+ # 5. Prepare latent variables
213
+ num_channels_latents = self.unet.config.in_channels
214
+ latents = self.prepare_latents(
215
+ batch_size * num_images_per_prompt,
216
+ num_channels_latents,
217
+ height,
218
+ width,
219
+ prompt_embeds.dtype,
220
+ device,
221
+ generator,
222
+ latents,
223
+ )
224
+
225
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
226
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
227
+
228
+ # 7. Denoising loop
229
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
230
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
231
+ for i, t in enumerate(timesteps):
232
+ # expand the latents if we are doing classifier free guidance
233
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
234
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
235
+
236
+ # predict the noise residual
237
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
238
+
239
+ # perform guidance
240
+ if do_classifier_free_guidance:
241
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
242
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
243
+
244
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
245
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
246
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
247
+
248
+ # compute the previous noisy sample x_t -> x_t-1
249
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
250
+
251
+ # step callback
252
+ latents = self.controller.step_callback(latents)
253
+
254
+ # call the callback, if provided
255
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
256
+ progress_bar.update()
257
+ if callback is not None and i % callback_steps == 0:
258
+ step_idx = i // getattr(self.scheduler, "order", 1)
259
+ callback(step_idx, t, latents)
260
+
261
+ # 8. Post-processing
262
+ if not output_type == "latent":
263
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
264
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
265
+ else:
266
+ image = latents
267
+ has_nsfw_concept = None
268
+
269
+ # 9. Run safety checker
270
+ if has_nsfw_concept is None:
271
+ do_denormalize = [True] * image.shape[0]
272
+ else:
273
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
274
+
275
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
276
+
277
+ # Offload last model to CPU
278
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
279
+ self.final_offload_hook.offload()
280
+
281
+ if not return_dict:
282
+ return (image, has_nsfw_concept)
283
+
284
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
285
+
286
+ def register_attention_control(self, controller):
287
+ attn_procs = {}
288
+ cross_att_count = 0
289
+ for name in self.unet.attn_processors.keys():
290
+ None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim
291
+ if name.startswith("mid_block"):
292
+ self.unet.config.block_out_channels[-1]
293
+ place_in_unet = "mid"
294
+ elif name.startswith("up_blocks"):
295
+ block_id = int(name[len("up_blocks.")])
296
+ list(reversed(self.unet.config.block_out_channels))[block_id]
297
+ place_in_unet = "up"
298
+ elif name.startswith("down_blocks"):
299
+ block_id = int(name[len("down_blocks.")])
300
+ self.unet.config.block_out_channels[block_id]
301
+ place_in_unet = "down"
302
+ else:
303
+ continue
304
+ cross_att_count += 1
305
+ attn_procs[name] = P2PCrossAttnProcessor(controller=controller, place_in_unet=place_in_unet)
306
+
307
+ self.unet.set_attn_processor(attn_procs)
308
+ controller.num_att_layers = cross_att_count
309
+
310
+
311
+ class P2PCrossAttnProcessor:
312
+ def __init__(self, controller, place_in_unet):
313
+ super().__init__()
314
+ self.controller = controller
315
+ self.place_in_unet = place_in_unet
316
+
317
+ def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None):
318
+ batch_size, sequence_length, _ = hidden_states.shape
319
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
320
+
321
+ query = attn.to_q(hidden_states)
322
+
323
+ is_cross = encoder_hidden_states is not None
324
+ encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
325
+ key = attn.to_k(encoder_hidden_states)
326
+ value = attn.to_v(encoder_hidden_states)
327
+
328
+ query = attn.head_to_batch_dim(query)
329
+ key = attn.head_to_batch_dim(key)
330
+ value = attn.head_to_batch_dim(value)
331
+
332
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
333
+
334
+ # one line change
335
+ self.controller(attention_probs, is_cross, self.place_in_unet)
336
+
337
+ hidden_states = torch.bmm(attention_probs, value)
338
+ hidden_states = attn.batch_to_head_dim(hidden_states)
339
+
340
+ # linear proj
341
+ hidden_states = attn.to_out[0](hidden_states)
342
+ # dropout
343
+ hidden_states = attn.to_out[1](hidden_states)
344
+
345
+ return hidden_states
346
+
347
+
348
+ def create_controller(
349
+ prompts: List[str], cross_attention_kwargs: Dict, num_inference_steps: int, tokenizer, device
350
+ ) -> AttentionControl:
351
+ edit_type = cross_attention_kwargs.get("edit_type", None)
352
+ local_blend_words = cross_attention_kwargs.get("local_blend_words", None)
353
+ equalizer_words = cross_attention_kwargs.get("equalizer_words", None)
354
+ equalizer_strengths = cross_attention_kwargs.get("equalizer_strengths", None)
355
+ n_cross_replace = cross_attention_kwargs.get("n_cross_replace", 0.4)
356
+ n_self_replace = cross_attention_kwargs.get("n_self_replace", 0.4)
357
+
358
+ # only replace
359
+ if edit_type == "replace" and local_blend_words is None:
360
+ return AttentionReplace(
361
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, tokenizer=tokenizer, device=device
362
+ )
363
+
364
+ # replace + localblend
365
+ if edit_type == "replace" and local_blend_words is not None:
366
+ lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
367
+ return AttentionReplace(
368
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, lb, tokenizer=tokenizer, device=device
369
+ )
370
+
371
+ # only refine
372
+ if edit_type == "refine" and local_blend_words is None:
373
+ return AttentionRefine(
374
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, tokenizer=tokenizer, device=device
375
+ )
376
+
377
+ # refine + localblend
378
+ if edit_type == "refine" and local_blend_words is not None:
379
+ lb = LocalBlend(prompts, local_blend_words, tokenizer=tokenizer, device=device)
380
+ return AttentionRefine(
381
+ prompts, num_inference_steps, n_cross_replace, n_self_replace, lb, tokenizer=tokenizer, device=device
382
+ )
383
+
384
+ # reweight
385
+ if edit_type == "reweight":
386
+ assert (
387
+ equalizer_words is not None and equalizer_strengths is not None
388
+ ), "To use reweight edit, please specify equalizer_words and equalizer_strengths."
389
+ assert len(equalizer_words) == len(
390
+ equalizer_strengths
391
+ ), "equalizer_words and equalizer_strengths must be of same length."
392
+ equalizer = get_equalizer(prompts[1], equalizer_words, equalizer_strengths, tokenizer=tokenizer)
393
+ return AttentionReweight(
394
+ prompts,
395
+ num_inference_steps,
396
+ n_cross_replace,
397
+ n_self_replace,
398
+ tokenizer=tokenizer,
399
+ device=device,
400
+ equalizer=equalizer,
401
+ )
402
+
403
+ raise ValueError(f"Edit type {edit_type} not recognized. Use one of: replace, refine, reweight.")
404
+
405
+
406
+ class AttentionControl(abc.ABC):
407
+ def step_callback(self, x_t):
408
+ return x_t
409
+
410
+ def between_steps(self):
411
+ return
412
+
413
+ @property
414
+ def num_uncond_att_layers(self):
415
+ return 0
416
+
417
+ @abc.abstractmethod
418
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
419
+ raise NotImplementedError
420
+
421
+ def __call__(self, attn, is_cross: bool, place_in_unet: str):
422
+ if self.cur_att_layer >= self.num_uncond_att_layers:
423
+ h = attn.shape[0]
424
+ attn[h // 2 :] = self.forward(attn[h // 2 :], is_cross, place_in_unet)
425
+ self.cur_att_layer += 1
426
+ if self.cur_att_layer == self.num_att_layers + self.num_uncond_att_layers:
427
+ self.cur_att_layer = 0
428
+ self.cur_step += 1
429
+ self.between_steps()
430
+ return attn
431
+
432
+ def reset(self):
433
+ self.cur_step = 0
434
+ self.cur_att_layer = 0
435
+
436
+ def __init__(self):
437
+ self.cur_step = 0
438
+ self.num_att_layers = -1
439
+ self.cur_att_layer = 0
440
+
441
+
442
+ class EmptyControl(AttentionControl):
443
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
444
+ return attn
445
+
446
+
447
+ class AttentionStore(AttentionControl):
448
+ @staticmethod
449
+ def get_empty_store():
450
+ return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []}
451
+
452
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
453
+ key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
454
+ if attn.shape[1] <= 32**2: # avoid memory overhead
455
+ self.step_store[key].append(attn)
456
+ return attn
457
+
458
+ def between_steps(self):
459
+ if len(self.attention_store) == 0:
460
+ self.attention_store = self.step_store
461
+ else:
462
+ for key in self.attention_store:
463
+ for i in range(len(self.attention_store[key])):
464
+ self.attention_store[key][i] += self.step_store[key][i]
465
+ self.step_store = self.get_empty_store()
466
+
467
+ def get_average_attention(self):
468
+ average_attention = {
469
+ key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store
470
+ }
471
+ return average_attention
472
+
473
+ def reset(self):
474
+ super(AttentionStore, self).reset()
475
+ self.step_store = self.get_empty_store()
476
+ self.attention_store = {}
477
+
478
+ def __init__(self):
479
+ super(AttentionStore, self).__init__()
480
+ self.step_store = self.get_empty_store()
481
+ self.attention_store = {}
482
+
483
+
484
+ class LocalBlend:
485
+ def __call__(self, x_t, attention_store):
486
+ k = 1
487
+ maps = attention_store["down_cross"][2:4] + attention_store["up_cross"][:3]
488
+ maps = [item.reshape(self.alpha_layers.shape[0], -1, 1, 16, 16, self.max_num_words) for item in maps]
489
+ maps = torch.cat(maps, dim=1)
490
+ maps = (maps * self.alpha_layers).sum(-1).mean(1)
491
+ mask = F.max_pool2d(maps, (k * 2 + 1, k * 2 + 1), (1, 1), padding=(k, k))
492
+ mask = F.interpolate(mask, size=(x_t.shape[2:]))
493
+ mask = mask / mask.max(2, keepdims=True)[0].max(3, keepdims=True)[0]
494
+ mask = mask.gt(self.threshold)
495
+ mask = (mask[:1] + mask[1:]).float()
496
+ x_t = x_t[:1] + mask * (x_t - x_t[:1])
497
+ return x_t
498
+
499
+ def __init__(
500
+ self, prompts: List[str], words: [List[List[str]]], tokenizer, device, threshold=0.3, max_num_words=77
501
+ ):
502
+ self.max_num_words = 77
503
+
504
+ alpha_layers = torch.zeros(len(prompts), 1, 1, 1, 1, self.max_num_words)
505
+ for i, (prompt, words_) in enumerate(zip(prompts, words)):
506
+ if isinstance(words_, str):
507
+ words_ = [words_]
508
+ for word in words_:
509
+ ind = get_word_inds(prompt, word, tokenizer)
510
+ alpha_layers[i, :, :, :, :, ind] = 1
511
+ self.alpha_layers = alpha_layers.to(device)
512
+ self.threshold = threshold
513
+
514
+
515
+ class AttentionControlEdit(AttentionStore, abc.ABC):
516
+ def step_callback(self, x_t):
517
+ if self.local_blend is not None:
518
+ x_t = self.local_blend(x_t, self.attention_store)
519
+ return x_t
520
+
521
+ def replace_self_attention(self, attn_base, att_replace):
522
+ if att_replace.shape[2] <= 16**2:
523
+ return attn_base.unsqueeze(0).expand(att_replace.shape[0], *attn_base.shape)
524
+ else:
525
+ return att_replace
526
+
527
+ @abc.abstractmethod
528
+ def replace_cross_attention(self, attn_base, att_replace):
529
+ raise NotImplementedError
530
+
531
+ def forward(self, attn, is_cross: bool, place_in_unet: str):
532
+ super(AttentionControlEdit, self).forward(attn, is_cross, place_in_unet)
533
+ # FIXME not replace correctly
534
+ if is_cross or (self.num_self_replace[0] <= self.cur_step < self.num_self_replace[1]):
535
+ h = attn.shape[0] // (self.batch_size)
536
+ attn = attn.reshape(self.batch_size, h, *attn.shape[1:])
537
+ attn_base, attn_repalce = attn[0], attn[1:]
538
+ if is_cross:
539
+ alpha_words = self.cross_replace_alpha[self.cur_step]
540
+ attn_repalce_new = (
541
+ self.replace_cross_attention(attn_base, attn_repalce) * alpha_words
542
+ + (1 - alpha_words) * attn_repalce
543
+ )
544
+ attn[1:] = attn_repalce_new
545
+ else:
546
+ attn[1:] = self.replace_self_attention(attn_base, attn_repalce)
547
+ attn = attn.reshape(self.batch_size * h, *attn.shape[2:])
548
+ return attn
549
+
550
+ def __init__(
551
+ self,
552
+ prompts,
553
+ num_steps: int,
554
+ cross_replace_steps: Union[float, Tuple[float, float], Dict[str, Tuple[float, float]]],
555
+ self_replace_steps: Union[float, Tuple[float, float]],
556
+ local_blend: Optional[LocalBlend],
557
+ tokenizer,
558
+ device,
559
+ ):
560
+ super(AttentionControlEdit, self).__init__()
561
+ # add tokenizer and device here
562
+
563
+ self.tokenizer = tokenizer
564
+ self.device = device
565
+
566
+ self.batch_size = len(prompts)
567
+ self.cross_replace_alpha = get_time_words_attention_alpha(
568
+ prompts, num_steps, cross_replace_steps, self.tokenizer
569
+ ).to(self.device)
570
+ if isinstance(self_replace_steps, float):
571
+ self_replace_steps = 0, self_replace_steps
572
+ self.num_self_replace = int(num_steps * self_replace_steps[0]), int(num_steps * self_replace_steps[1])
573
+ self.local_blend = local_blend # 在外面定义后传进来
574
+
575
+
576
+ class AttentionReplace(AttentionControlEdit):
577
+ def replace_cross_attention(self, attn_base, att_replace):
578
+ return torch.einsum("hpw,bwn->bhpn", attn_base, self.mapper)
579
+
580
+ def __init__(
581
+ self,
582
+ prompts,
583
+ num_steps: int,
584
+ cross_replace_steps: float,
585
+ self_replace_steps: float,
586
+ local_blend: Optional[LocalBlend] = None,
587
+ tokenizer=None,
588
+ device=None,
589
+ ):
590
+ super(AttentionReplace, self).__init__(
591
+ prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device
592
+ )
593
+ self.mapper = get_replacement_mapper(prompts, self.tokenizer).to(self.device)
594
+
595
+
596
+ class AttentionRefine(AttentionControlEdit):
597
+ def replace_cross_attention(self, attn_base, att_replace):
598
+ attn_base_replace = attn_base[:, :, self.mapper].permute(2, 0, 1, 3)
599
+ attn_replace = attn_base_replace * self.alphas + att_replace * (1 - self.alphas)
600
+ return attn_replace
601
+
602
+ def __init__(
603
+ self,
604
+ prompts,
605
+ num_steps: int,
606
+ cross_replace_steps: float,
607
+ self_replace_steps: float,
608
+ local_blend: Optional[LocalBlend] = None,
609
+ tokenizer=None,
610
+ device=None,
611
+ ):
612
+ super(AttentionRefine, self).__init__(
613
+ prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device
614
+ )
615
+ self.mapper, alphas = get_refinement_mapper(prompts, self.tokenizer)
616
+ self.mapper, alphas = self.mapper.to(self.device), alphas.to(self.device)
617
+ self.alphas = alphas.reshape(alphas.shape[0], 1, 1, alphas.shape[1])
618
+
619
+
620
+ class AttentionReweight(AttentionControlEdit):
621
+ def replace_cross_attention(self, attn_base, att_replace):
622
+ if self.prev_controller is not None:
623
+ attn_base = self.prev_controller.replace_cross_attention(attn_base, att_replace)
624
+ attn_replace = attn_base[None, :, :, :] * self.equalizer[:, None, None, :]
625
+ return attn_replace
626
+
627
+ def __init__(
628
+ self,
629
+ prompts,
630
+ num_steps: int,
631
+ cross_replace_steps: float,
632
+ self_replace_steps: float,
633
+ equalizer,
634
+ local_blend: Optional[LocalBlend] = None,
635
+ controller: Optional[AttentionControlEdit] = None,
636
+ tokenizer=None,
637
+ device=None,
638
+ ):
639
+ super(AttentionReweight, self).__init__(
640
+ prompts, num_steps, cross_replace_steps, self_replace_steps, local_blend, tokenizer, device
641
+ )
642
+ self.equalizer = equalizer.to(self.device)
643
+ self.prev_controller = controller
644
+
645
+
646
+ ### util functions for all Edits
647
+ def update_alpha_time_word(
648
+ alpha, bounds: Union[float, Tuple[float, float]], prompt_ind: int, word_inds: Optional[torch.Tensor] = None
649
+ ):
650
+ if isinstance(bounds, float):
651
+ bounds = 0, bounds
652
+ start, end = int(bounds[0] * alpha.shape[0]), int(bounds[1] * alpha.shape[0])
653
+ if word_inds is None:
654
+ word_inds = torch.arange(alpha.shape[2])
655
+ alpha[:start, prompt_ind, word_inds] = 0
656
+ alpha[start:end, prompt_ind, word_inds] = 1
657
+ alpha[end:, prompt_ind, word_inds] = 0
658
+ return alpha
659
+
660
+
661
+ def get_time_words_attention_alpha(
662
+ prompts, num_steps, cross_replace_steps: Union[float, Dict[str, Tuple[float, float]]], tokenizer, max_num_words=77
663
+ ):
664
+ if not isinstance(cross_replace_steps, dict):
665
+ cross_replace_steps = {"default_": cross_replace_steps}
666
+ if "default_" not in cross_replace_steps:
667
+ cross_replace_steps["default_"] = (0.0, 1.0)
668
+ alpha_time_words = torch.zeros(num_steps + 1, len(prompts) - 1, max_num_words)
669
+ for i in range(len(prompts) - 1):
670
+ alpha_time_words = update_alpha_time_word(alpha_time_words, cross_replace_steps["default_"], i)
671
+ for key, item in cross_replace_steps.items():
672
+ if key != "default_":
673
+ inds = [get_word_inds(prompts[i], key, tokenizer) for i in range(1, len(prompts))]
674
+ for i, ind in enumerate(inds):
675
+ if len(ind) > 0:
676
+ alpha_time_words = update_alpha_time_word(alpha_time_words, item, i, ind)
677
+ alpha_time_words = alpha_time_words.reshape(num_steps + 1, len(prompts) - 1, 1, 1, max_num_words)
678
+ return alpha_time_words
679
+
680
+
681
+ ### util functions for LocalBlend and ReplacementEdit
682
+ def get_word_inds(text: str, word_place: int, tokenizer):
683
+ split_text = text.split(" ")
684
+ if isinstance(word_place, str):
685
+ word_place = [i for i, word in enumerate(split_text) if word_place == word]
686
+ elif isinstance(word_place, int):
687
+ word_place = [word_place]
688
+ out = []
689
+ if len(word_place) > 0:
690
+ words_encode = [tokenizer.decode([item]).strip("#") for item in tokenizer.encode(text)][1:-1]
691
+ cur_len, ptr = 0, 0
692
+
693
+ for i in range(len(words_encode)):
694
+ cur_len += len(words_encode[i])
695
+ if ptr in word_place:
696
+ out.append(i + 1)
697
+ if cur_len >= len(split_text[ptr]):
698
+ ptr += 1
699
+ cur_len = 0
700
+ return np.array(out)
701
+
702
+
703
+ ### util functions for ReplacementEdit
704
+ def get_replacement_mapper_(x: str, y: str, tokenizer, max_len=77):
705
+ words_x = x.split(" ")
706
+ words_y = y.split(" ")
707
+ if len(words_x) != len(words_y):
708
+ raise ValueError(
709
+ f"attention replacement edit can only be applied on prompts with the same length"
710
+ f" but prompt A has {len(words_x)} words and prompt B has {len(words_y)} words."
711
+ )
712
+ inds_replace = [i for i in range(len(words_y)) if words_y[i] != words_x[i]]
713
+ inds_source = [get_word_inds(x, i, tokenizer) for i in inds_replace]
714
+ inds_target = [get_word_inds(y, i, tokenizer) for i in inds_replace]
715
+ mapper = np.zeros((max_len, max_len))
716
+ i = j = 0
717
+ cur_inds = 0
718
+ while i < max_len and j < max_len:
719
+ if cur_inds < len(inds_source) and inds_source[cur_inds][0] == i:
720
+ inds_source_, inds_target_ = inds_source[cur_inds], inds_target[cur_inds]
721
+ if len(inds_source_) == len(inds_target_):
722
+ mapper[inds_source_, inds_target_] = 1
723
+ else:
724
+ ratio = 1 / len(inds_target_)
725
+ for i_t in inds_target_:
726
+ mapper[inds_source_, i_t] = ratio
727
+ cur_inds += 1
728
+ i += len(inds_source_)
729
+ j += len(inds_target_)
730
+ elif cur_inds < len(inds_source):
731
+ mapper[i, j] = 1
732
+ i += 1
733
+ j += 1
734
+ else:
735
+ mapper[j, j] = 1
736
+ i += 1
737
+ j += 1
738
+
739
+ return torch.from_numpy(mapper).float()
740
+
741
+
742
+ def get_replacement_mapper(prompts, tokenizer, max_len=77):
743
+ x_seq = prompts[0]
744
+ mappers = []
745
+ for i in range(1, len(prompts)):
746
+ mapper = get_replacement_mapper_(x_seq, prompts[i], tokenizer, max_len)
747
+ mappers.append(mapper)
748
+ return torch.stack(mappers)
749
+
750
+
751
+ ### util functions for ReweightEdit
752
+ def get_equalizer(
753
+ text: str, word_select: Union[int, Tuple[int, ...]], values: Union[List[float], Tuple[float, ...]], tokenizer
754
+ ):
755
+ if isinstance(word_select, (int, str)):
756
+ word_select = (word_select,)
757
+ equalizer = torch.ones(len(values), 77)
758
+ values = torch.tensor(values, dtype=torch.float32)
759
+ for word in word_select:
760
+ inds = get_word_inds(text, word, tokenizer)
761
+ equalizer[:, inds] = values
762
+ return equalizer
763
+
764
+
765
+ ### util functions for RefinementEdit
766
+ class ScoreParams:
767
+ def __init__(self, gap, match, mismatch):
768
+ self.gap = gap
769
+ self.match = match
770
+ self.mismatch = mismatch
771
+
772
+ def mis_match_char(self, x, y):
773
+ if x != y:
774
+ return self.mismatch
775
+ else:
776
+ return self.match
777
+
778
+
779
+ def get_matrix(size_x, size_y, gap):
780
+ matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
781
+ matrix[0, 1:] = (np.arange(size_y) + 1) * gap
782
+ matrix[1:, 0] = (np.arange(size_x) + 1) * gap
783
+ return matrix
784
+
785
+
786
+ def get_traceback_matrix(size_x, size_y):
787
+ matrix = np.zeros((size_x + 1, size_y + 1), dtype=np.int32)
788
+ matrix[0, 1:] = 1
789
+ matrix[1:, 0] = 2
790
+ matrix[0, 0] = 4
791
+ return matrix
792
+
793
+
794
+ def global_align(x, y, score):
795
+ matrix = get_matrix(len(x), len(y), score.gap)
796
+ trace_back = get_traceback_matrix(len(x), len(y))
797
+ for i in range(1, len(x) + 1):
798
+ for j in range(1, len(y) + 1):
799
+ left = matrix[i, j - 1] + score.gap
800
+ up = matrix[i - 1, j] + score.gap
801
+ diag = matrix[i - 1, j - 1] + score.mis_match_char(x[i - 1], y[j - 1])
802
+ matrix[i, j] = max(left, up, diag)
803
+ if matrix[i, j] == left:
804
+ trace_back[i, j] = 1
805
+ elif matrix[i, j] == up:
806
+ trace_back[i, j] = 2
807
+ else:
808
+ trace_back[i, j] = 3
809
+ return matrix, trace_back
810
+
811
+
812
+ def get_aligned_sequences(x, y, trace_back):
813
+ x_seq = []
814
+ y_seq = []
815
+ i = len(x)
816
+ j = len(y)
817
+ mapper_y_to_x = []
818
+ while i > 0 or j > 0:
819
+ if trace_back[i, j] == 3:
820
+ x_seq.append(x[i - 1])
821
+ y_seq.append(y[j - 1])
822
+ i = i - 1
823
+ j = j - 1
824
+ mapper_y_to_x.append((j, i))
825
+ elif trace_back[i][j] == 1:
826
+ x_seq.append("-")
827
+ y_seq.append(y[j - 1])
828
+ j = j - 1
829
+ mapper_y_to_x.append((j, -1))
830
+ elif trace_back[i][j] == 2:
831
+ x_seq.append(x[i - 1])
832
+ y_seq.append("-")
833
+ i = i - 1
834
+ elif trace_back[i][j] == 4:
835
+ break
836
+ mapper_y_to_x.reverse()
837
+ return x_seq, y_seq, torch.tensor(mapper_y_to_x, dtype=torch.int64)
838
+
839
+
840
+ def get_mapper(x: str, y: str, tokenizer, max_len=77):
841
+ x_seq = tokenizer.encode(x)
842
+ y_seq = tokenizer.encode(y)
843
+ score = ScoreParams(0, 1, -1)
844
+ matrix, trace_back = global_align(x_seq, y_seq, score)
845
+ mapper_base = get_aligned_sequences(x_seq, y_seq, trace_back)[-1]
846
+ alphas = torch.ones(max_len)
847
+ alphas[: mapper_base.shape[0]] = mapper_base[:, 1].ne(-1).float()
848
+ mapper = torch.zeros(max_len, dtype=torch.int64)
849
+ mapper[: mapper_base.shape[0]] = mapper_base[:, 1]
850
+ mapper[mapper_base.shape[0] :] = len(y_seq) + torch.arange(max_len - len(y_seq))
851
+ return mapper, alphas
852
+
853
+
854
+ def get_refinement_mapper(prompts, tokenizer, max_len=77):
855
+ x_seq = prompts[0]
856
+ mappers, alphas = [], []
857
+ for i in range(1, len(prompts)):
858
+ mapper, alpha = get_mapper(x_seq, prompts[i], tokenizer, max_len)
859
+ mappers.append(mapper)
860
+ alphas.append(alpha)
861
+ return torch.stack(mappers), torch.stack(alphas)
v0.26.3/pipeline_sdxl_style_aligned.py ADDED
The diff for this file is too large to render. See raw diff
 
v0.26.3/pipeline_stable_diffusion_upscale_ldm3d.py ADDED
@@ -0,0 +1,772 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The Intel Labs Team Authors and the HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import PIL
20
+ import torch
21
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
22
+
23
+ from diffusers import DiffusionPipeline
24
+ from diffusers.image_processor import PipelineDepthInput, PipelineImageInput, VaeImageProcessorLDM3D
25
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
27
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
28
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
29
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d import LDM3DPipelineOutput
30
+ from diffusers.schedulers import DDPMScheduler, KarrasDiffusionSchedulers
31
+ from diffusers.utils import (
32
+ USE_PEFT_BACKEND,
33
+ deprecate,
34
+ logging,
35
+ scale_lora_layers,
36
+ unscale_lora_layers,
37
+ )
38
+ from diffusers.utils.torch_utils import randn_tensor
39
+
40
+
41
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
42
+
43
+ EXAMPLE_DOC_STRING = """
44
+ Examples:
45
+ ```python
46
+ >>> from diffusers import StableDiffusionUpscaleLDM3DPipeline
47
+ >>> from PIL import Image
48
+ >>> from io import BytesIO
49
+ >>> import requests
50
+
51
+ >>> pipe = StableDiffusionUpscaleLDM3DPipeline.from_pretrained("Intel/ldm3d-sr")
52
+ >>> pipe = pipe.to("cuda")
53
+ >>> rgb_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_rgb.jpg"
54
+ >>> depth_path = "https://huggingface.co/Intel/ldm3d-sr/resolve/main/lemons_ldm3d_depth.png"
55
+ >>> low_res_rgb = Image.open(BytesIO(requests.get(rgb_path).content)).convert("RGB")
56
+ >>> low_res_depth = Image.open(BytesIO(requests.get(depth_path).content)).convert("L")
57
+ >>> output = pipe(
58
+ ... prompt="high quality high resolution uhd 4k image",
59
+ ... rgb=low_res_rgb,
60
+ ... depth=low_res_depth,
61
+ ... num_inference_steps=50,
62
+ ... target_res=[1024, 1024],
63
+ ... )
64
+ >>> rgb_image, depth_image = output.rgb, output.depth
65
+ >>> rgb_image[0].save("hr_ldm3d_rgb.jpg")
66
+ >>> depth_image[0].save("hr_ldm3d_depth.png")
67
+ ```
68
+ """
69
+
70
+
71
+ class StableDiffusionUpscaleLDM3DPipeline(
72
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
73
+ ):
74
+ r"""
75
+ Pipeline for text-to-image and 3D generation using LDM3D.
76
+
77
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
78
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
79
+
80
+ The pipeline also inherits the following loading methods:
81
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
82
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
83
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
84
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
85
+
86
+ Args:
87
+ vae ([`AutoencoderKL`]):
88
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
89
+ text_encoder ([`~transformers.CLIPTextModel`]):
90
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
91
+ tokenizer ([`~transformers.CLIPTokenizer`]):
92
+ A `CLIPTokenizer` to tokenize text.
93
+ unet ([`UNet2DConditionModel`]):
94
+ A `UNet2DConditionModel` to denoise the encoded image latents.
95
+ low_res_scheduler ([`SchedulerMixin`]):
96
+ A scheduler used to add initial noise to the low resolution conditioning image. It must be an instance of
97
+ [`DDPMScheduler`].
98
+ scheduler ([`SchedulerMixin`]):
99
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
100
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
101
+ safety_checker ([`StableDiffusionSafetyChecker`]):
102
+ Classification module that estimates whether generated images could be considered offensive or harmful.
103
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
104
+ about a model's potential harms.
105
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
106
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
107
+ """
108
+
109
+ _optional_components = ["safety_checker", "feature_extractor"]
110
+
111
+ def __init__(
112
+ self,
113
+ vae: AutoencoderKL,
114
+ text_encoder: CLIPTextModel,
115
+ tokenizer: CLIPTokenizer,
116
+ unet: UNet2DConditionModel,
117
+ low_res_scheduler: DDPMScheduler,
118
+ scheduler: KarrasDiffusionSchedulers,
119
+ safety_checker: StableDiffusionSafetyChecker,
120
+ feature_extractor: CLIPImageProcessor,
121
+ requires_safety_checker: bool = True,
122
+ watermarker: Optional[Any] = None,
123
+ max_noise_level: int = 350,
124
+ ):
125
+ super().__init__()
126
+
127
+ if safety_checker is None and requires_safety_checker:
128
+ logger.warning(
129
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
130
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
131
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
132
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
133
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
134
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
135
+ )
136
+
137
+ if safety_checker is not None and feature_extractor is None:
138
+ raise ValueError(
139
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
140
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
141
+ )
142
+
143
+ self.register_modules(
144
+ vae=vae,
145
+ text_encoder=text_encoder,
146
+ tokenizer=tokenizer,
147
+ unet=unet,
148
+ low_res_scheduler=low_res_scheduler,
149
+ scheduler=scheduler,
150
+ safety_checker=safety_checker,
151
+ watermarker=watermarker,
152
+ feature_extractor=feature_extractor,
153
+ )
154
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
155
+ self.image_processor = VaeImageProcessorLDM3D(vae_scale_factor=self.vae_scale_factor, resample="bilinear")
156
+ # self.register_to_config(requires_safety_checker=requires_safety_checker)
157
+ self.register_to_config(max_noise_level=max_noise_level)
158
+
159
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline._encode_prompt
160
+ def _encode_prompt(
161
+ self,
162
+ prompt,
163
+ device,
164
+ num_images_per_prompt,
165
+ do_classifier_free_guidance,
166
+ negative_prompt=None,
167
+ prompt_embeds: Optional[torch.FloatTensor] = None,
168
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
169
+ lora_scale: Optional[float] = None,
170
+ **kwargs,
171
+ ):
172
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
173
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
174
+
175
+ prompt_embeds_tuple = self.encode_prompt(
176
+ prompt=prompt,
177
+ device=device,
178
+ num_images_per_prompt=num_images_per_prompt,
179
+ do_classifier_free_guidance=do_classifier_free_guidance,
180
+ negative_prompt=negative_prompt,
181
+ prompt_embeds=prompt_embeds,
182
+ negative_prompt_embeds=negative_prompt_embeds,
183
+ lora_scale=lora_scale,
184
+ **kwargs,
185
+ )
186
+
187
+ # concatenate for backwards comp
188
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
189
+
190
+ return prompt_embeds
191
+
192
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_ldm3d.StableDiffusionLDM3DPipeline.encode_prompt
193
+ def encode_prompt(
194
+ self,
195
+ prompt,
196
+ device,
197
+ num_images_per_prompt,
198
+ do_classifier_free_guidance,
199
+ negative_prompt=None,
200
+ prompt_embeds: Optional[torch.FloatTensor] = None,
201
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
202
+ lora_scale: Optional[float] = None,
203
+ clip_skip: Optional[int] = None,
204
+ ):
205
+ r"""
206
+ Encodes the prompt into text encoder hidden states.
207
+
208
+ Args:
209
+ prompt (`str` or `List[str]`, *optional*):
210
+ prompt to be encoded
211
+ device: (`torch.device`):
212
+ torch device
213
+ num_images_per_prompt (`int`):
214
+ number of images that should be generated per prompt
215
+ do_classifier_free_guidance (`bool`):
216
+ whether to use classifier free guidance or not
217
+ negative_prompt (`str` or `List[str]`, *optional*):
218
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
219
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
220
+ less than `1`).
221
+ prompt_embeds (`torch.FloatTensor`, *optional*):
222
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
223
+ provided, text embeddings will be generated from `prompt` input argument.
224
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
225
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
226
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
227
+ argument.
228
+ lora_scale (`float`, *optional*):
229
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
230
+ clip_skip (`int`, *optional*):
231
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
232
+ the output of the pre-final layer will be used for computing the prompt embeddings.
233
+ """
234
+ # set lora scale so that monkey patched LoRA
235
+ # function of text encoder can correctly access it
236
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
237
+ self._lora_scale = lora_scale
238
+
239
+ # dynamically adjust the LoRA scale
240
+ if not USE_PEFT_BACKEND:
241
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
242
+ else:
243
+ scale_lora_layers(self.text_encoder, lora_scale)
244
+
245
+ if prompt is not None and isinstance(prompt, str):
246
+ batch_size = 1
247
+ elif prompt is not None and isinstance(prompt, list):
248
+ batch_size = len(prompt)
249
+ else:
250
+ batch_size = prompt_embeds.shape[0]
251
+
252
+ if prompt_embeds is None:
253
+ # textual inversion: procecss multi-vector tokens if necessary
254
+ if isinstance(self, TextualInversionLoaderMixin):
255
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
256
+
257
+ text_inputs = self.tokenizer(
258
+ prompt,
259
+ padding="max_length",
260
+ max_length=self.tokenizer.model_max_length,
261
+ truncation=True,
262
+ return_tensors="pt",
263
+ )
264
+ text_input_ids = text_inputs.input_ids
265
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
266
+
267
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
268
+ text_input_ids, untruncated_ids
269
+ ):
270
+ removed_text = self.tokenizer.batch_decode(
271
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
272
+ )
273
+ logger.warning(
274
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
275
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
276
+ )
277
+
278
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
279
+ attention_mask = text_inputs.attention_mask.to(device)
280
+ else:
281
+ attention_mask = None
282
+
283
+ if clip_skip is None:
284
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
285
+ prompt_embeds = prompt_embeds[0]
286
+ else:
287
+ prompt_embeds = self.text_encoder(
288
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
289
+ )
290
+ # Access the `hidden_states` first, that contains a tuple of
291
+ # all the hidden states from the encoder layers. Then index into
292
+ # the tuple to access the hidden states from the desired layer.
293
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
294
+ # We also need to apply the final LayerNorm here to not mess with the
295
+ # representations. The `last_hidden_states` that we typically use for
296
+ # obtaining the final prompt representations passes through the LayerNorm
297
+ # layer.
298
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
299
+
300
+ if self.text_encoder is not None:
301
+ prompt_embeds_dtype = self.text_encoder.dtype
302
+ elif self.unet is not None:
303
+ prompt_embeds_dtype = self.unet.dtype
304
+ else:
305
+ prompt_embeds_dtype = prompt_embeds.dtype
306
+
307
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
308
+
309
+ bs_embed, seq_len, _ = prompt_embeds.shape
310
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
311
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
312
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
313
+
314
+ # get unconditional embeddings for classifier free guidance
315
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
316
+ uncond_tokens: List[str]
317
+ if negative_prompt is None:
318
+ uncond_tokens = [""] * batch_size
319
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
320
+ raise TypeError(
321
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
322
+ f" {type(prompt)}."
323
+ )
324
+ elif isinstance(negative_prompt, str):
325
+ uncond_tokens = [negative_prompt]
326
+ elif batch_size != len(negative_prompt):
327
+ raise ValueError(
328
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
329
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
330
+ " the batch size of `prompt`."
331
+ )
332
+ else:
333
+ uncond_tokens = negative_prompt
334
+
335
+ # textual inversion: procecss multi-vector tokens if necessary
336
+ if isinstance(self, TextualInversionLoaderMixin):
337
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
338
+
339
+ max_length = prompt_embeds.shape[1]
340
+ uncond_input = self.tokenizer(
341
+ uncond_tokens,
342
+ padding="max_length",
343
+ max_length=max_length,
344
+ truncation=True,
345
+ return_tensors="pt",
346
+ )
347
+
348
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
349
+ attention_mask = uncond_input.attention_mask.to(device)
350
+ else:
351
+ attention_mask = None
352
+
353
+ negative_prompt_embeds = self.text_encoder(
354
+ uncond_input.input_ids.to(device),
355
+ attention_mask=attention_mask,
356
+ )
357
+ negative_prompt_embeds = negative_prompt_embeds[0]
358
+
359
+ if do_classifier_free_guidance:
360
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
361
+ seq_len = negative_prompt_embeds.shape[1]
362
+
363
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
364
+
365
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
366
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
367
+
368
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
369
+ # Retrieve the original scale by scaling back the LoRA layers
370
+ unscale_lora_layers(self.text_encoder, lora_scale)
371
+
372
+ return prompt_embeds, negative_prompt_embeds
373
+
374
+ def run_safety_checker(self, image, device, dtype):
375
+ if self.safety_checker is None:
376
+ has_nsfw_concept = None
377
+ else:
378
+ if torch.is_tensor(image):
379
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
380
+ else:
381
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
382
+ rgb_feature_extractor_input = feature_extractor_input[0]
383
+ safety_checker_input = self.feature_extractor(rgb_feature_extractor_input, return_tensors="pt").to(device)
384
+ image, has_nsfw_concept = self.safety_checker(
385
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
386
+ )
387
+ return image, has_nsfw_concept
388
+
389
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
390
+ def prepare_extra_step_kwargs(self, generator, eta):
391
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
392
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
393
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
394
+ # and should be between [0, 1]
395
+
396
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
397
+ extra_step_kwargs = {}
398
+ if accepts_eta:
399
+ extra_step_kwargs["eta"] = eta
400
+
401
+ # check if the scheduler accepts generator
402
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
403
+ if accepts_generator:
404
+ extra_step_kwargs["generator"] = generator
405
+ return extra_step_kwargs
406
+
407
+ def check_inputs(
408
+ self,
409
+ prompt,
410
+ image,
411
+ noise_level,
412
+ callback_steps,
413
+ negative_prompt=None,
414
+ prompt_embeds=None,
415
+ negative_prompt_embeds=None,
416
+ target_res=None,
417
+ ):
418
+ if (callback_steps is None) or (
419
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
420
+ ):
421
+ raise ValueError(
422
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
423
+ f" {type(callback_steps)}."
424
+ )
425
+
426
+ if prompt is not None and prompt_embeds is not None:
427
+ raise ValueError(
428
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
429
+ " only forward one of the two."
430
+ )
431
+ elif prompt is None and prompt_embeds is None:
432
+ raise ValueError(
433
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
434
+ )
435
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
436
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
437
+
438
+ if negative_prompt is not None and negative_prompt_embeds is not None:
439
+ raise ValueError(
440
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
441
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
442
+ )
443
+
444
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
445
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
446
+ raise ValueError(
447
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
448
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
449
+ f" {negative_prompt_embeds.shape}."
450
+ )
451
+
452
+ if (
453
+ not isinstance(image, torch.Tensor)
454
+ and not isinstance(image, PIL.Image.Image)
455
+ and not isinstance(image, np.ndarray)
456
+ and not isinstance(image, list)
457
+ ):
458
+ raise ValueError(
459
+ f"`image` has to be of type `torch.Tensor`, `np.ndarray`, `PIL.Image.Image` or `list` but is {type(image)}"
460
+ )
461
+
462
+ # verify batch size of prompt and image are same if image is a list or tensor or numpy array
463
+ if isinstance(image, list) or isinstance(image, torch.Tensor) or isinstance(image, np.ndarray):
464
+ if prompt is not None and isinstance(prompt, str):
465
+ batch_size = 1
466
+ elif prompt is not None and isinstance(prompt, list):
467
+ batch_size = len(prompt)
468
+ else:
469
+ batch_size = prompt_embeds.shape[0]
470
+
471
+ if isinstance(image, list):
472
+ image_batch_size = len(image)
473
+ else:
474
+ image_batch_size = image.shape[0]
475
+ if batch_size != image_batch_size:
476
+ raise ValueError(
477
+ f"`prompt` has batch size {batch_size} and `image` has batch size {image_batch_size}."
478
+ " Please make sure that passed `prompt` matches the batch size of `image`."
479
+ )
480
+
481
+ # check noise level
482
+ if noise_level > self.config.max_noise_level:
483
+ raise ValueError(f"`noise_level` has to be <= {self.config.max_noise_level} but is {noise_level}")
484
+
485
+ if (callback_steps is None) or (
486
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
487
+ ):
488
+ raise ValueError(
489
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
490
+ f" {type(callback_steps)}."
491
+ )
492
+
493
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
494
+ shape = (batch_size, num_channels_latents, height, width)
495
+ if latents is None:
496
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
497
+ else:
498
+ if latents.shape != shape:
499
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
500
+ latents = latents.to(device)
501
+
502
+ # scale the initial noise by the standard deviation required by the scheduler
503
+ latents = latents * self.scheduler.init_noise_sigma
504
+ return latents
505
+
506
+ # def upcast_vae(self):
507
+ # dtype = self.vae.dtype
508
+ # self.vae.to(dtype=torch.float32)
509
+ # use_torch_2_0_or_xformers = isinstance(
510
+ # self.vae.decoder.mid_block.attentions[0].processor,
511
+ # (
512
+ # AttnProcessor2_0,
513
+ # XFormersAttnProcessor,
514
+ # LoRAXFormersAttnProcessor,
515
+ # LoRAAttnProcessor2_0,
516
+ # ),
517
+ # )
518
+ # # if xformers or torch_2_0 is used attention block does not need
519
+ # # to be in float32 which can save lots of memory
520
+ # if use_torch_2_0_or_xformers:
521
+ # self.vae.post_quant_conv.to(dtype)
522
+ # self.vae.decoder.conv_in.to(dtype)
523
+ # self.vae.decoder.mid_block.to(dtype)
524
+
525
+ @torch.no_grad()
526
+ def __call__(
527
+ self,
528
+ prompt: Union[str, List[str]] = None,
529
+ rgb: PipelineImageInput = None,
530
+ depth: PipelineDepthInput = None,
531
+ num_inference_steps: int = 75,
532
+ guidance_scale: float = 9.0,
533
+ noise_level: int = 20,
534
+ negative_prompt: Optional[Union[str, List[str]]] = None,
535
+ num_images_per_prompt: Optional[int] = 1,
536
+ eta: float = 0.0,
537
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
538
+ latents: Optional[torch.FloatTensor] = None,
539
+ prompt_embeds: Optional[torch.FloatTensor] = None,
540
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
541
+ output_type: Optional[str] = "pil",
542
+ return_dict: bool = True,
543
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
544
+ callback_steps: int = 1,
545
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
546
+ target_res: Optional[List[int]] = [1024, 1024],
547
+ ):
548
+ r"""
549
+ The call function to the pipeline for generation.
550
+
551
+ Args:
552
+ prompt (`str` or `List[str]`, *optional*):
553
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
554
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
555
+ `Image` or tensor representing an image batch to be upscaled.
556
+ num_inference_steps (`int`, *optional*, defaults to 50):
557
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
558
+ expense of slower inference.
559
+ guidance_scale (`float`, *optional*, defaults to 5.0):
560
+ A higher guidance scale value encourages the model to generate images closely linked to the text
561
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
562
+ negative_prompt (`str` or `List[str]`, *optional*):
563
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
564
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
565
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
566
+ The number of images to generate per prompt.
567
+ eta (`float`, *optional*, defaults to 0.0):
568
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
569
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
570
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
571
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
572
+ generation deterministic.
573
+ latents (`torch.FloatTensor`, *optional*):
574
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
575
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
576
+ tensor is generated by sampling using the supplied random `generator`.
577
+ prompt_embeds (`torch.FloatTensor`, *optional*):
578
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
579
+ provided, text embeddings are generated from the `prompt` input argument.
580
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
581
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
582
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
583
+ output_type (`str`, *optional*, defaults to `"pil"`):
584
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
585
+ return_dict (`bool`, *optional*, defaults to `True`):
586
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
587
+ plain tuple.
588
+ callback (`Callable`, *optional*):
589
+ A function that calls every `callback_steps` steps during inference. The function is called with the
590
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
591
+ callback_steps (`int`, *optional*, defaults to 1):
592
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
593
+ every step.
594
+ cross_attention_kwargs (`dict`, *optional*):
595
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
596
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
597
+
598
+ Examples:
599
+
600
+ Returns:
601
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
602
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
603
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
604
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
605
+ "not-safe-for-work" (nsfw) content.
606
+ """
607
+ # 1. Check inputs. Raise error if not correct
608
+ self.check_inputs(
609
+ prompt,
610
+ rgb,
611
+ noise_level,
612
+ callback_steps,
613
+ negative_prompt,
614
+ prompt_embeds,
615
+ negative_prompt_embeds,
616
+ )
617
+ # 2. Define call parameters
618
+ if prompt is not None and isinstance(prompt, str):
619
+ batch_size = 1
620
+ elif prompt is not None and isinstance(prompt, list):
621
+ batch_size = len(prompt)
622
+ else:
623
+ batch_size = prompt_embeds.shape[0]
624
+
625
+ device = self._execution_device
626
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
627
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
628
+ # corresponds to doing no classifier free guidance.
629
+ do_classifier_free_guidance = guidance_scale > 1.0
630
+
631
+ # 3. Encode input prompt
632
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
633
+ prompt,
634
+ device,
635
+ num_images_per_prompt,
636
+ do_classifier_free_guidance,
637
+ negative_prompt,
638
+ prompt_embeds=prompt_embeds,
639
+ negative_prompt_embeds=negative_prompt_embeds,
640
+ )
641
+ # For classifier free guidance, we need to do two forward passes.
642
+ # Here we concatenate the unconditional and text embeddings into a single batch
643
+ # to avoid doing two forward passes
644
+ if do_classifier_free_guidance:
645
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
646
+
647
+ # 4. Preprocess image
648
+ rgb, depth = self.image_processor.preprocess(rgb, depth, target_res=target_res)
649
+ rgb = rgb.to(dtype=prompt_embeds.dtype, device=device)
650
+ depth = depth.to(dtype=prompt_embeds.dtype, device=device)
651
+
652
+ # 5. set timesteps
653
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
654
+ timesteps = self.scheduler.timesteps
655
+
656
+ # 6. Encode low resolutiom image to latent space
657
+ image = torch.cat([rgb, depth], axis=1)
658
+ latent_space_image = self.vae.encode(image).latent_dist.sample(generator)
659
+ latent_space_image *= self.vae.scaling_factor
660
+ noise_level = torch.tensor([noise_level], dtype=torch.long, device=device)
661
+ # noise_rgb = randn_tensor(rgb.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
662
+ # rgb = self.low_res_scheduler.add_noise(rgb, noise_rgb, noise_level)
663
+ # noise_depth = randn_tensor(depth.shape, generator=generator, device=device, dtype=prompt_embeds.dtype)
664
+ # depth = self.low_res_scheduler.add_noise(depth, noise_depth, noise_level)
665
+
666
+ batch_multiplier = 2 if do_classifier_free_guidance else 1
667
+ latent_space_image = torch.cat([latent_space_image] * batch_multiplier * num_images_per_prompt)
668
+ noise_level = torch.cat([noise_level] * latent_space_image.shape[0])
669
+
670
+ # 7. Prepare latent variables
671
+ height, width = latent_space_image.shape[2:]
672
+ num_channels_latents = self.vae.config.latent_channels
673
+
674
+ latents = self.prepare_latents(
675
+ batch_size * num_images_per_prompt,
676
+ num_channels_latents,
677
+ height,
678
+ width,
679
+ prompt_embeds.dtype,
680
+ device,
681
+ generator,
682
+ latents,
683
+ )
684
+
685
+ # 8. Check that sizes of image and latents match
686
+ num_channels_image = latent_space_image.shape[1]
687
+ if num_channels_latents + num_channels_image != self.unet.config.in_channels:
688
+ raise ValueError(
689
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
690
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
691
+ f" `num_channels_image`: {num_channels_image} "
692
+ f" = {num_channels_latents+num_channels_image}. Please verify the config of"
693
+ " `pipeline.unet` or your `image` input."
694
+ )
695
+
696
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
697
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
698
+
699
+ # 10. Denoising loop
700
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
701
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
702
+ for i, t in enumerate(timesteps):
703
+ # expand the latents if we are doing classifier free guidance
704
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
705
+
706
+ # concat latents, mask, masked_image_latents in the channel dimension
707
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
708
+ latent_model_input = torch.cat([latent_model_input, latent_space_image], dim=1)
709
+
710
+ # predict the noise residual
711
+ noise_pred = self.unet(
712
+ latent_model_input,
713
+ t,
714
+ encoder_hidden_states=prompt_embeds,
715
+ cross_attention_kwargs=cross_attention_kwargs,
716
+ class_labels=noise_level,
717
+ return_dict=False,
718
+ )[0]
719
+
720
+ # perform guidance
721
+ if do_classifier_free_guidance:
722
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
723
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
724
+
725
+ # compute the previous noisy sample x_t -> x_t-1
726
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
727
+
728
+ # call the callback, if provided
729
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
730
+ progress_bar.update()
731
+ if callback is not None and i % callback_steps == 0:
732
+ callback(i, t, latents)
733
+
734
+ if not output_type == "latent":
735
+ # make sure the VAE is in float32 mode, as it overflows in float16
736
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
737
+
738
+ if needs_upcasting:
739
+ self.upcast_vae()
740
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
741
+
742
+ image = self.vae.decode(latents / self.vae.scaling_factor, return_dict=False)[0]
743
+
744
+ # cast back to fp16 if needed
745
+ if needs_upcasting:
746
+ self.vae.to(dtype=torch.float16)
747
+
748
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
749
+
750
+ else:
751
+ image = latents
752
+ has_nsfw_concept = None
753
+
754
+ if has_nsfw_concept is None:
755
+ do_denormalize = [True] * image.shape[0]
756
+ else:
757
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
758
+
759
+ rgb, depth = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
760
+
761
+ # 11. Apply watermark
762
+ if output_type == "pil" and self.watermarker is not None:
763
+ rgb = self.watermarker.apply_watermark(rgb)
764
+
765
+ # Offload last model to CPU
766
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
767
+ self.final_offload_hook.offload()
768
+
769
+ if not return_dict:
770
+ return ((rgb, depth), has_nsfw_concept)
771
+
772
+ return LDM3DPipelineOutput(rgb=rgb, depth=depth, nsfw_content_detected=has_nsfw_concept)
v0.26.3/pipeline_stable_diffusion_xl_controlnet_adapter.py ADDED
@@ -0,0 +1,1463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 TencentARC and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
24
+
25
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
26
+ from diffusers.loaders import FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
27
+ from diffusers.models import AutoencoderKL, ControlNetModel, MultiAdapter, T2IAdapter, UNet2DConditionModel
28
+ from diffusers.models.attention_processor import (
29
+ AttnProcessor2_0,
30
+ LoRAAttnProcessor2_0,
31
+ LoRAXFormersAttnProcessor,
32
+ XFormersAttnProcessor,
33
+ )
34
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
35
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
36
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
37
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
38
+ from diffusers.schedulers import KarrasDiffusionSchedulers
39
+ from diffusers.utils import (
40
+ PIL_INTERPOLATION,
41
+ USE_PEFT_BACKEND,
42
+ logging,
43
+ replace_example_docstring,
44
+ scale_lora_layers,
45
+ unscale_lora_layers,
46
+ )
47
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
48
+
49
+
50
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
51
+
52
+ EXAMPLE_DOC_STRING = """
53
+ Examples:
54
+ ```py
55
+ >>> import torch
56
+ >>> from diffusers import T2IAdapter, StableDiffusionXLAdapterPipeline, DDPMScheduler
57
+ >>> from diffusers.utils import load_image
58
+ >>> from controlnet_aux.midas import MidasDetector
59
+
60
+ >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
61
+ >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
62
+
63
+ >>> image = load_image(img_url).resize((1024, 1024))
64
+ >>> mask_image = load_image(mask_url).resize((1024, 1024))
65
+
66
+ >>> midas_depth = MidasDetector.from_pretrained(
67
+ ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
68
+ ... ).to("cuda")
69
+
70
+ >>> depth_image = midas_depth(
71
+ ... image, detect_resolution=512, image_resolution=1024
72
+ ... )
73
+
74
+ >>> model_id = "stabilityai/stable-diffusion-xl-base-1.0"
75
+
76
+ >>> adapter = T2IAdapter.from_pretrained(
77
+ ... "Adapter/t2iadapter",
78
+ ... subfolder="sketch_sdxl_1.0",
79
+ ... torch_dtype=torch.float16,
80
+ ... adapter_type="full_adapter_xl",
81
+ ... )
82
+
83
+ >>> controlnet = ControlNetModel.from_pretrained(
84
+ ... "diffusers/controlnet-depth-sdxl-1.0",
85
+ ... torch_dtype=torch.float16,
86
+ ... variant="fp16",
87
+ ... use_safetensors=True
88
+ ... ).to("cuda")
89
+
90
+ >>> scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler")
91
+
92
+ >>> pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
93
+ ... model_id,
94
+ ... adapter=adapter,
95
+ ... controlnet=controlnet,
96
+ ... torch_dtype=torch.float16,
97
+ ... variant="fp16",
98
+ ... scheduler=scheduler
99
+ ... ).to("cuda")
100
+
101
+ >>> strength = 0.5
102
+
103
+ >>> generator = torch.manual_seed(42)
104
+ >>> sketch_image_out = pipe(
105
+ ... prompt="a photo of a tiger sitting on a park bench",
106
+ ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
107
+ ... adapter_image=depth_image,
108
+ ... control_image=mask_image,
109
+ ... adapter_conditioning_scale=strength,
110
+ ... controlnet_conditioning_scale=strength,
111
+ ... generator=generator,
112
+ ... guidance_scale=7.5,
113
+ ... ).images[0]
114
+ ```
115
+ """
116
+
117
+
118
+ def _preprocess_adapter_image(image, height, width):
119
+ if isinstance(image, torch.Tensor):
120
+ return image
121
+ elif isinstance(image, PIL.Image.Image):
122
+ image = [image]
123
+
124
+ if isinstance(image[0], PIL.Image.Image):
125
+ image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
126
+ image = [
127
+ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
128
+ ] # expand [h, w] or [h, w, c] to [b, h, w, c]
129
+ image = np.concatenate(image, axis=0)
130
+ image = np.array(image).astype(np.float32) / 255.0
131
+ image = image.transpose(0, 3, 1, 2)
132
+ image = torch.from_numpy(image)
133
+ elif isinstance(image[0], torch.Tensor):
134
+ if image[0].ndim == 3:
135
+ image = torch.stack(image, dim=0)
136
+ elif image[0].ndim == 4:
137
+ image = torch.cat(image, dim=0)
138
+ else:
139
+ raise ValueError(
140
+ f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
141
+ )
142
+ return image
143
+
144
+
145
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
146
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
147
+ """
148
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
149
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
150
+ """
151
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
152
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
153
+ # rescale the results from guidance (fixes overexposure)
154
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
155
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
156
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
157
+ return noise_cfg
158
+
159
+
160
+ class StableDiffusionXLControlNetAdapterPipeline(
161
+ DiffusionPipeline, FromSingleFileMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin
162
+ ):
163
+ r"""
164
+ Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
165
+ https://arxiv.org/abs/2302.08453
166
+
167
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
168
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
169
+
170
+ Args:
171
+ adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
172
+ Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
173
+ list, the outputs from each Adapter are added together to create one combined additional conditioning.
174
+ adapter_weights (`List[float]`, *optional*, defaults to None):
175
+ List of floats representing the weight which will be multiply to each adapter's output before adding them
176
+ together.
177
+ vae ([`AutoencoderKL`]):
178
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
179
+ text_encoder ([`CLIPTextModel`]):
180
+ Frozen text-encoder. Stable Diffusion uses the text portion of
181
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
182
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
183
+ tokenizer (`CLIPTokenizer`):
184
+ Tokenizer of class
185
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
186
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
187
+ scheduler ([`SchedulerMixin`]):
188
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
189
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
190
+ safety_checker ([`StableDiffusionSafetyChecker`]):
191
+ Classification module that estimates whether generated images could be considered offensive or harmful.
192
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
193
+ feature_extractor ([`CLIPFeatureExtractor`]):
194
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
195
+ """
196
+
197
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
198
+ _optional_components = ["tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2"]
199
+
200
+ def __init__(
201
+ self,
202
+ vae: AutoencoderKL,
203
+ text_encoder: CLIPTextModel,
204
+ text_encoder_2: CLIPTextModelWithProjection,
205
+ tokenizer: CLIPTokenizer,
206
+ tokenizer_2: CLIPTokenizer,
207
+ unet: UNet2DConditionModel,
208
+ adapter: Union[T2IAdapter, MultiAdapter, List[T2IAdapter]],
209
+ controlnet: Union[ControlNetModel, MultiControlNetModel],
210
+ scheduler: KarrasDiffusionSchedulers,
211
+ force_zeros_for_empty_prompt: bool = True,
212
+ ):
213
+ super().__init__()
214
+
215
+ if isinstance(controlnet, (list, tuple)):
216
+ controlnet = MultiControlNetModel(controlnet)
217
+
218
+ self.register_modules(
219
+ vae=vae,
220
+ text_encoder=text_encoder,
221
+ text_encoder_2=text_encoder_2,
222
+ tokenizer=tokenizer,
223
+ tokenizer_2=tokenizer_2,
224
+ unet=unet,
225
+ adapter=adapter,
226
+ controlnet=controlnet,
227
+ scheduler=scheduler,
228
+ )
229
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
230
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
231
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
232
+ self.control_image_processor = VaeImageProcessor(
233
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
234
+ )
235
+ self.default_sample_size = self.unet.config.sample_size
236
+
237
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
238
+ def enable_vae_slicing(self):
239
+ r"""
240
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
241
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
242
+ """
243
+ self.vae.enable_slicing()
244
+
245
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
246
+ def disable_vae_slicing(self):
247
+ r"""
248
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
249
+ computing decoding in one step.
250
+ """
251
+ self.vae.disable_slicing()
252
+
253
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
254
+ def enable_vae_tiling(self):
255
+ r"""
256
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
257
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
258
+ processing larger images.
259
+ """
260
+ self.vae.enable_tiling()
261
+
262
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
263
+ def disable_vae_tiling(self):
264
+ r"""
265
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
266
+ computing decoding in one step.
267
+ """
268
+ self.vae.disable_tiling()
269
+
270
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
271
+ def encode_prompt(
272
+ self,
273
+ prompt: str,
274
+ prompt_2: Optional[str] = None,
275
+ device: Optional[torch.device] = None,
276
+ num_images_per_prompt: int = 1,
277
+ do_classifier_free_guidance: bool = True,
278
+ negative_prompt: Optional[str] = None,
279
+ negative_prompt_2: Optional[str] = None,
280
+ prompt_embeds: Optional[torch.FloatTensor] = None,
281
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
282
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
283
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
284
+ lora_scale: Optional[float] = None,
285
+ clip_skip: Optional[int] = None,
286
+ ):
287
+ r"""
288
+ Encodes the prompt into text encoder hidden states.
289
+
290
+ Args:
291
+ prompt (`str` or `List[str]`, *optional*):
292
+ prompt to be encoded
293
+ prompt_2 (`str` or `List[str]`, *optional*):
294
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
295
+ used in both text-encoders
296
+ device: (`torch.device`):
297
+ torch device
298
+ num_images_per_prompt (`int`):
299
+ number of images that should be generated per prompt
300
+ do_classifier_free_guidance (`bool`):
301
+ whether to use classifier free guidance or not
302
+ negative_prompt (`str` or `List[str]`, *optional*):
303
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
304
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
305
+ less than `1`).
306
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
307
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
308
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
309
+ prompt_embeds (`torch.FloatTensor`, *optional*):
310
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
311
+ provided, text embeddings will be generated from `prompt` input argument.
312
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
313
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
314
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
315
+ argument.
316
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
317
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
318
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
319
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
320
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
321
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
322
+ input argument.
323
+ lora_scale (`float`, *optional*):
324
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
325
+ clip_skip (`int`, *optional*):
326
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
327
+ the output of the pre-final layer will be used for computing the prompt embeddings.
328
+ """
329
+ device = device or self._execution_device
330
+
331
+ # set lora scale so that monkey patched LoRA
332
+ # function of text encoder can correctly access it
333
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
334
+ self._lora_scale = lora_scale
335
+
336
+ # dynamically adjust the LoRA scale
337
+ if self.text_encoder is not None:
338
+ if not USE_PEFT_BACKEND:
339
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
340
+ else:
341
+ scale_lora_layers(self.text_encoder, lora_scale)
342
+
343
+ if self.text_encoder_2 is not None:
344
+ if not USE_PEFT_BACKEND:
345
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
346
+ else:
347
+ scale_lora_layers(self.text_encoder_2, lora_scale)
348
+
349
+ prompt = [prompt] if isinstance(prompt, str) else prompt
350
+
351
+ if prompt is not None:
352
+ batch_size = len(prompt)
353
+ else:
354
+ batch_size = prompt_embeds.shape[0]
355
+
356
+ # Define tokenizers and text encoders
357
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
358
+ text_encoders = (
359
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
360
+ )
361
+
362
+ if prompt_embeds is None:
363
+ prompt_2 = prompt_2 or prompt
364
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
365
+
366
+ # textual inversion: procecss multi-vector tokens if necessary
367
+ prompt_embeds_list = []
368
+ prompts = [prompt, prompt_2]
369
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
370
+ if isinstance(self, TextualInversionLoaderMixin):
371
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
372
+
373
+ text_inputs = tokenizer(
374
+ prompt,
375
+ padding="max_length",
376
+ max_length=tokenizer.model_max_length,
377
+ truncation=True,
378
+ return_tensors="pt",
379
+ )
380
+
381
+ text_input_ids = text_inputs.input_ids
382
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
383
+
384
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
385
+ text_input_ids, untruncated_ids
386
+ ):
387
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
388
+ logger.warning(
389
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
390
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
391
+ )
392
+
393
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
394
+
395
+ # We are only ALWAYS interested in the pooled output of the final text encoder
396
+ pooled_prompt_embeds = prompt_embeds[0]
397
+ if clip_skip is None:
398
+ prompt_embeds = prompt_embeds.hidden_states[-2]
399
+ else:
400
+ # "2" because SDXL always indexes from the penultimate layer.
401
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
402
+
403
+ prompt_embeds_list.append(prompt_embeds)
404
+
405
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
406
+
407
+ # get unconditional embeddings for classifier free guidance
408
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
409
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
410
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
411
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
412
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
413
+ negative_prompt = negative_prompt or ""
414
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
415
+
416
+ # normalize str to list
417
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
418
+ negative_prompt_2 = (
419
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
420
+ )
421
+
422
+ uncond_tokens: List[str]
423
+ if prompt is not None and type(prompt) is not type(negative_prompt):
424
+ raise TypeError(
425
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
426
+ f" {type(prompt)}."
427
+ )
428
+ elif batch_size != len(negative_prompt):
429
+ raise ValueError(
430
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
431
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
432
+ " the batch size of `prompt`."
433
+ )
434
+ else:
435
+ uncond_tokens = [negative_prompt, negative_prompt_2]
436
+
437
+ negative_prompt_embeds_list = []
438
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
439
+ if isinstance(self, TextualInversionLoaderMixin):
440
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
441
+
442
+ max_length = prompt_embeds.shape[1]
443
+ uncond_input = tokenizer(
444
+ negative_prompt,
445
+ padding="max_length",
446
+ max_length=max_length,
447
+ truncation=True,
448
+ return_tensors="pt",
449
+ )
450
+
451
+ negative_prompt_embeds = text_encoder(
452
+ uncond_input.input_ids.to(device),
453
+ output_hidden_states=True,
454
+ )
455
+ # We are only ALWAYS interested in the pooled output of the final text encoder
456
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
457
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
458
+
459
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
460
+
461
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
462
+
463
+ if self.text_encoder_2 is not None:
464
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
465
+ else:
466
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
467
+
468
+ bs_embed, seq_len, _ = prompt_embeds.shape
469
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
470
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
471
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
472
+
473
+ if do_classifier_free_guidance:
474
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
475
+ seq_len = negative_prompt_embeds.shape[1]
476
+
477
+ if self.text_encoder_2 is not None:
478
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
479
+ else:
480
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
481
+
482
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
483
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
484
+
485
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
486
+ bs_embed * num_images_per_prompt, -1
487
+ )
488
+ if do_classifier_free_guidance:
489
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
490
+ bs_embed * num_images_per_prompt, -1
491
+ )
492
+
493
+ if self.text_encoder is not None:
494
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
495
+ # Retrieve the original scale by scaling back the LoRA layers
496
+ unscale_lora_layers(self.text_encoder, lora_scale)
497
+
498
+ if self.text_encoder_2 is not None:
499
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
500
+ # Retrieve the original scale by scaling back the LoRA layers
501
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
502
+
503
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
504
+
505
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
506
+ def prepare_extra_step_kwargs(self, generator, eta):
507
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
508
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
509
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
510
+ # and should be between [0, 1]
511
+
512
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
513
+ extra_step_kwargs = {}
514
+ if accepts_eta:
515
+ extra_step_kwargs["eta"] = eta
516
+
517
+ # check if the scheduler accepts generator
518
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
519
+ if accepts_generator:
520
+ extra_step_kwargs["generator"] = generator
521
+ return extra_step_kwargs
522
+
523
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
524
+ def check_image(self, image, prompt, prompt_embeds):
525
+ image_is_pil = isinstance(image, PIL.Image.Image)
526
+ image_is_tensor = isinstance(image, torch.Tensor)
527
+ image_is_np = isinstance(image, np.ndarray)
528
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
529
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
530
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
531
+
532
+ if (
533
+ not image_is_pil
534
+ and not image_is_tensor
535
+ and not image_is_np
536
+ and not image_is_pil_list
537
+ and not image_is_tensor_list
538
+ and not image_is_np_list
539
+ ):
540
+ raise TypeError(
541
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
542
+ )
543
+
544
+ if image_is_pil:
545
+ image_batch_size = 1
546
+ else:
547
+ image_batch_size = len(image)
548
+
549
+ if prompt is not None and isinstance(prompt, str):
550
+ prompt_batch_size = 1
551
+ elif prompt is not None and isinstance(prompt, list):
552
+ prompt_batch_size = len(prompt)
553
+ elif prompt_embeds is not None:
554
+ prompt_batch_size = prompt_embeds.shape[0]
555
+
556
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
557
+ raise ValueError(
558
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
559
+ )
560
+
561
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
562
+ def check_inputs(
563
+ self,
564
+ prompt,
565
+ prompt_2,
566
+ height,
567
+ width,
568
+ callback_steps,
569
+ negative_prompt=None,
570
+ negative_prompt_2=None,
571
+ prompt_embeds=None,
572
+ negative_prompt_embeds=None,
573
+ pooled_prompt_embeds=None,
574
+ negative_pooled_prompt_embeds=None,
575
+ callback_on_step_end_tensor_inputs=None,
576
+ ):
577
+ if height % 8 != 0 or width % 8 != 0:
578
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
579
+
580
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
581
+ raise ValueError(
582
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
583
+ f" {type(callback_steps)}."
584
+ )
585
+
586
+ if callback_on_step_end_tensor_inputs is not None and not all(
587
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
588
+ ):
589
+ raise ValueError(
590
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
591
+ )
592
+
593
+ if prompt is not None and prompt_embeds is not None:
594
+ raise ValueError(
595
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
596
+ " only forward one of the two."
597
+ )
598
+ elif prompt_2 is not None and prompt_embeds is not None:
599
+ raise ValueError(
600
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
601
+ " only forward one of the two."
602
+ )
603
+ elif prompt is None and prompt_embeds is None:
604
+ raise ValueError(
605
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
606
+ )
607
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
608
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
609
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
610
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
611
+
612
+ if negative_prompt is not None and negative_prompt_embeds is not None:
613
+ raise ValueError(
614
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
615
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
616
+ )
617
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
618
+ raise ValueError(
619
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
620
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
621
+ )
622
+
623
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
624
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
625
+ raise ValueError(
626
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
627
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
628
+ f" {negative_prompt_embeds.shape}."
629
+ )
630
+
631
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
632
+ raise ValueError(
633
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
634
+ )
635
+
636
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
637
+ raise ValueError(
638
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
639
+ )
640
+
641
+ def check_conditions(
642
+ self,
643
+ prompt,
644
+ prompt_embeds,
645
+ adapter_image,
646
+ control_image,
647
+ adapter_conditioning_scale,
648
+ controlnet_conditioning_scale,
649
+ control_guidance_start,
650
+ control_guidance_end,
651
+ ):
652
+ # controlnet checks
653
+ if not isinstance(control_guidance_start, (tuple, list)):
654
+ control_guidance_start = [control_guidance_start]
655
+
656
+ if not isinstance(control_guidance_end, (tuple, list)):
657
+ control_guidance_end = [control_guidance_end]
658
+
659
+ if len(control_guidance_start) != len(control_guidance_end):
660
+ raise ValueError(
661
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
662
+ )
663
+
664
+ if isinstance(self.controlnet, MultiControlNetModel):
665
+ if len(control_guidance_start) != len(self.controlnet.nets):
666
+ raise ValueError(
667
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
668
+ )
669
+
670
+ for start, end in zip(control_guidance_start, control_guidance_end):
671
+ if start >= end:
672
+ raise ValueError(
673
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
674
+ )
675
+ if start < 0.0:
676
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
677
+ if end > 1.0:
678
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
679
+
680
+ # Check controlnet `image`
681
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
682
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
683
+ )
684
+ if (
685
+ isinstance(self.controlnet, ControlNetModel)
686
+ or is_compiled
687
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
688
+ ):
689
+ self.check_image(control_image, prompt, prompt_embeds)
690
+ elif (
691
+ isinstance(self.controlnet, MultiControlNetModel)
692
+ or is_compiled
693
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
694
+ ):
695
+ if not isinstance(control_image, list):
696
+ raise TypeError("For multiple controlnets: `control_image` must be type `list`")
697
+
698
+ # When `image` is a nested list:
699
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
700
+ elif any(isinstance(i, list) for i in control_image):
701
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
702
+ elif len(control_image) != len(self.controlnet.nets):
703
+ raise ValueError(
704
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
705
+ )
706
+
707
+ for image_ in control_image:
708
+ self.check_image(image_, prompt, prompt_embeds)
709
+ else:
710
+ assert False
711
+
712
+ # Check `controlnet_conditioning_scale`
713
+ if (
714
+ isinstance(self.controlnet, ControlNetModel)
715
+ or is_compiled
716
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
717
+ ):
718
+ if not isinstance(controlnet_conditioning_scale, float):
719
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
720
+ elif (
721
+ isinstance(self.controlnet, MultiControlNetModel)
722
+ or is_compiled
723
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
724
+ ):
725
+ if isinstance(controlnet_conditioning_scale, list):
726
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
727
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
728
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
729
+ self.controlnet.nets
730
+ ):
731
+ raise ValueError(
732
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
733
+ " the same length as the number of controlnets"
734
+ )
735
+ else:
736
+ assert False
737
+
738
+ # adapter checks
739
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
740
+ self.check_image(adapter_image, prompt, prompt_embeds)
741
+ elif (
742
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
743
+ ):
744
+ if not isinstance(adapter_image, list):
745
+ raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
746
+
747
+ # When `image` is a nested list:
748
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
749
+ elif any(isinstance(i, list) for i in adapter_image):
750
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
751
+ elif len(adapter_image) != len(self.adapter.adapters):
752
+ raise ValueError(
753
+ f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
754
+ )
755
+
756
+ for image_ in adapter_image:
757
+ self.check_image(image_, prompt, prompt_embeds)
758
+ else:
759
+ assert False
760
+
761
+ # Check `adapter_conditioning_scale`
762
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
763
+ if not isinstance(adapter_conditioning_scale, float):
764
+ raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
765
+ elif (
766
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
767
+ ):
768
+ if isinstance(adapter_conditioning_scale, list):
769
+ if any(isinstance(i, list) for i in adapter_conditioning_scale):
770
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
771
+ elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
772
+ self.adapter.adapters
773
+ ):
774
+ raise ValueError(
775
+ "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
776
+ " the same length as the number of adapters"
777
+ )
778
+ else:
779
+ assert False
780
+
781
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
782
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
783
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
784
+ if isinstance(generator, list) and len(generator) != batch_size:
785
+ raise ValueError(
786
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
787
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
788
+ )
789
+
790
+ if latents is None:
791
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
792
+ else:
793
+ latents = latents.to(device)
794
+
795
+ # scale the initial noise by the standard deviation required by the scheduler
796
+ latents = latents * self.scheduler.init_noise_sigma
797
+ return latents
798
+
799
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids
800
+ def _get_add_time_ids(
801
+ self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
802
+ ):
803
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
804
+
805
+ passed_add_embed_dim = (
806
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
807
+ )
808
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
809
+
810
+ if expected_add_embed_dim != passed_add_embed_dim:
811
+ raise ValueError(
812
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
813
+ )
814
+
815
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
816
+ return add_time_ids
817
+
818
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
819
+ def upcast_vae(self):
820
+ dtype = self.vae.dtype
821
+ self.vae.to(dtype=torch.float32)
822
+ use_torch_2_0_or_xformers = isinstance(
823
+ self.vae.decoder.mid_block.attentions[0].processor,
824
+ (
825
+ AttnProcessor2_0,
826
+ XFormersAttnProcessor,
827
+ LoRAXFormersAttnProcessor,
828
+ LoRAAttnProcessor2_0,
829
+ ),
830
+ )
831
+ # if xformers or torch_2_0 is used attention block does not need
832
+ # to be in float32 which can save lots of memory
833
+ if use_torch_2_0_or_xformers:
834
+ self.vae.post_quant_conv.to(dtype)
835
+ self.vae.decoder.conv_in.to(dtype)
836
+ self.vae.decoder.mid_block.to(dtype)
837
+
838
+ # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
839
+ def _default_height_width(self, height, width, image):
840
+ # NOTE: It is possible that a list of images have different
841
+ # dimensions for each image, so just checking the first image
842
+ # is not _exactly_ correct, but it is simple.
843
+ while isinstance(image, list):
844
+ image = image[0]
845
+
846
+ if height is None:
847
+ if isinstance(image, PIL.Image.Image):
848
+ height = image.height
849
+ elif isinstance(image, torch.Tensor):
850
+ height = image.shape[-2]
851
+
852
+ # round down to nearest multiple of `self.adapter.downscale_factor`
853
+ height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
854
+
855
+ if width is None:
856
+ if isinstance(image, PIL.Image.Image):
857
+ width = image.width
858
+ elif isinstance(image, torch.Tensor):
859
+ width = image.shape[-1]
860
+
861
+ # round down to nearest multiple of `self.adapter.downscale_factor`
862
+ width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
863
+
864
+ return height, width
865
+
866
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
867
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
868
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
869
+
870
+ The suffixes after the scaling factors represent the stages where they are being applied.
871
+
872
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
873
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
874
+
875
+ Args:
876
+ s1 (`float`):
877
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
878
+ mitigate "oversmoothing effect" in the enhanced denoising process.
879
+ s2 (`float`):
880
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
881
+ mitigate "oversmoothing effect" in the enhanced denoising process.
882
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
883
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
884
+ """
885
+ if not hasattr(self, "unet"):
886
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
887
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
888
+
889
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
890
+ def disable_freeu(self):
891
+ """Disables the FreeU mechanism if enabled."""
892
+ self.unet.disable_freeu()
893
+
894
+ def prepare_control_image(
895
+ self,
896
+ image,
897
+ width,
898
+ height,
899
+ batch_size,
900
+ num_images_per_prompt,
901
+ device,
902
+ dtype,
903
+ do_classifier_free_guidance=False,
904
+ guess_mode=False,
905
+ ):
906
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
907
+ image_batch_size = image.shape[0]
908
+
909
+ if image_batch_size == 1:
910
+ repeat_by = batch_size
911
+ else:
912
+ # image batch size is the same as prompt batch size
913
+ repeat_by = num_images_per_prompt
914
+
915
+ image = image.repeat_interleave(repeat_by, dim=0)
916
+
917
+ image = image.to(device=device, dtype=dtype)
918
+
919
+ if do_classifier_free_guidance and not guess_mode:
920
+ image = torch.cat([image] * 2)
921
+
922
+ return image
923
+
924
+ @torch.no_grad()
925
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
926
+ def __call__(
927
+ self,
928
+ prompt: Union[str, List[str]] = None,
929
+ prompt_2: Optional[Union[str, List[str]]] = None,
930
+ adapter_image: PipelineImageInput = None,
931
+ control_image: PipelineImageInput = None,
932
+ height: Optional[int] = None,
933
+ width: Optional[int] = None,
934
+ num_inference_steps: int = 50,
935
+ denoising_end: Optional[float] = None,
936
+ guidance_scale: float = 5.0,
937
+ negative_prompt: Optional[Union[str, List[str]]] = None,
938
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
939
+ num_images_per_prompt: Optional[int] = 1,
940
+ eta: float = 0.0,
941
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
942
+ latents: Optional[torch.FloatTensor] = None,
943
+ prompt_embeds: Optional[torch.FloatTensor] = None,
944
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
945
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
946
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
947
+ output_type: Optional[str] = "pil",
948
+ return_dict: bool = True,
949
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
950
+ callback_steps: int = 1,
951
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
952
+ guidance_rescale: float = 0.0,
953
+ original_size: Optional[Tuple[int, int]] = None,
954
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
955
+ target_size: Optional[Tuple[int, int]] = None,
956
+ negative_original_size: Optional[Tuple[int, int]] = None,
957
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
958
+ negative_target_size: Optional[Tuple[int, int]] = None,
959
+ adapter_conditioning_scale: Union[float, List[float]] = 1.0,
960
+ adapter_conditioning_factor: float = 1.0,
961
+ clip_skip: Optional[int] = None,
962
+ controlnet_conditioning_scale=1.0,
963
+ guess_mode: bool = False,
964
+ control_guidance_start: float = 0.0,
965
+ control_guidance_end: float = 1.0,
966
+ ):
967
+ r"""
968
+ Function invoked when calling the pipeline for generation.
969
+
970
+ Args:
971
+ prompt (`str` or `List[str]`, *optional*):
972
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
973
+ instead.
974
+ prompt_2 (`str` or `List[str]`, *optional*):
975
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
976
+ used in both text-encoders
977
+ adapter_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
978
+ The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
979
+ type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
980
+ accepted as an image. The control image is automatically resized to fit the output image.
981
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
982
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
983
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
984
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
985
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
986
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
987
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
988
+ input to a single ControlNet.
989
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
990
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
991
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
992
+ and checkpoints that are not specifically fine-tuned on low resolutions.
993
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
994
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
995
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
996
+ and checkpoints that are not specifically fine-tuned on low resolutions.
997
+ num_inference_steps (`int`, *optional*, defaults to 50):
998
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
999
+ expense of slower inference.
1000
+ denoising_end (`float`, *optional*):
1001
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1002
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1003
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
1004
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
1005
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1006
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
1007
+ guidance_scale (`float`, *optional*, defaults to 5.0):
1008
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1009
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1010
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1011
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1012
+ usually at the expense of lower image quality.
1013
+ negative_prompt (`str` or `List[str]`, *optional*):
1014
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1015
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1016
+ less than `1`).
1017
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1018
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1019
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1020
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1021
+ The number of images to generate per prompt.
1022
+ eta (`float`, *optional*, defaults to 0.0):
1023
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1024
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1025
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1026
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1027
+ to make generation deterministic.
1028
+ latents (`torch.FloatTensor`, *optional*):
1029
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1030
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1031
+ tensor will ge generated by sampling using the supplied random `generator`.
1032
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1033
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1034
+ provided, text embeddings will be generated from `prompt` input argument.
1035
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1036
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1037
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1038
+ argument.
1039
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1040
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1041
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1042
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1043
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1044
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1045
+ input argument.
1046
+ output_type (`str`, *optional*, defaults to `"pil"`):
1047
+ The output format of the generate image. Choose between
1048
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1049
+ return_dict (`bool`, *optional*, defaults to `True`):
1050
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
1051
+ instead of a plain tuple.
1052
+ callback (`Callable`, *optional*):
1053
+ A function that will be called every `callback_steps` steps during inference. The function will be
1054
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1055
+ callback_steps (`int`, *optional*, defaults to 1):
1056
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1057
+ called at every step.
1058
+ cross_attention_kwargs (`dict`, *optional*):
1059
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1060
+ `self.processor` in
1061
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1062
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
1063
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1064
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1065
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1066
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
1067
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1068
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1069
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
1070
+ explained in section 2.2 of
1071
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1072
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1073
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1074
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1075
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1076
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1077
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1078
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1079
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
1080
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1081
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1082
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1083
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
1084
+ micro-conditioning as explained in section 2.2 of
1085
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1086
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1087
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1088
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
1089
+ micro-conditioning as explained in section 2.2 of
1090
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1091
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1092
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1093
+ To negatively condition the generation process based on a target image resolution. It should be as same
1094
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
1095
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
1096
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
1097
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1098
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
1099
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1100
+ corresponding scale as a list.
1101
+ adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1102
+ The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
1103
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1104
+ corresponding scale as a list.
1105
+ adapter_conditioning_factor (`float`, *optional*, defaults to 1.0):
1106
+ The fraction of timesteps for which adapter should be applied. If `adapter_conditioning_factor` is
1107
+ `0.0`, adapter is not applied at all. If `adapter_conditioning_factor` is `1.0`, adapter is applied for
1108
+ all timesteps. If `adapter_conditioning_factor` is `0.5`, adapter is applied for half of the timesteps.
1109
+ clip_skip (`int`, *optional*):
1110
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1111
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1112
+
1113
+ Examples:
1114
+
1115
+ Returns:
1116
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
1117
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
1118
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
1119
+ """
1120
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1121
+ adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
1122
+
1123
+ # 0. Default height and width to unet
1124
+
1125
+ height, width = self._default_height_width(height, width, adapter_image)
1126
+ device = self._execution_device
1127
+
1128
+ if isinstance(adapter, MultiAdapter):
1129
+ adapter_input = []
1130
+
1131
+ for one_image in adapter_image:
1132
+ one_image = _preprocess_adapter_image(one_image, height, width)
1133
+ one_image = one_image.to(device=device, dtype=adapter.dtype)
1134
+ adapter_input.append(one_image)
1135
+ else:
1136
+ adapter_input = _preprocess_adapter_image(adapter_image, height, width)
1137
+ adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
1138
+ original_size = original_size or (height, width)
1139
+ target_size = target_size or (height, width)
1140
+
1141
+ # 0.1 align format for control guidance
1142
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1143
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1144
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1145
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1146
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1147
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1148
+ control_guidance_start, control_guidance_end = (
1149
+ mult * [control_guidance_start],
1150
+ mult * [control_guidance_end],
1151
+ )
1152
+
1153
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1154
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1155
+ if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
1156
+ adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.adapters)
1157
+
1158
+ # 1. Check inputs. Raise error if not correct
1159
+ self.check_inputs(
1160
+ prompt,
1161
+ prompt_2,
1162
+ height,
1163
+ width,
1164
+ callback_steps,
1165
+ negative_prompt=negative_prompt,
1166
+ negative_prompt_2=negative_prompt_2,
1167
+ prompt_embeds=prompt_embeds,
1168
+ negative_prompt_embeds=negative_prompt_embeds,
1169
+ pooled_prompt_embeds=pooled_prompt_embeds,
1170
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1171
+ )
1172
+
1173
+ self.check_conditions(
1174
+ prompt,
1175
+ prompt_embeds,
1176
+ adapter_image,
1177
+ control_image,
1178
+ adapter_conditioning_scale,
1179
+ controlnet_conditioning_scale,
1180
+ control_guidance_start,
1181
+ control_guidance_end,
1182
+ )
1183
+
1184
+ # 2. Define call parameters
1185
+ if prompt is not None and isinstance(prompt, str):
1186
+ batch_size = 1
1187
+ elif prompt is not None and isinstance(prompt, list):
1188
+ batch_size = len(prompt)
1189
+ else:
1190
+ batch_size = prompt_embeds.shape[0]
1191
+
1192
+ device = self._execution_device
1193
+
1194
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1195
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1196
+ # corresponds to doing no classifier free guidance.
1197
+ do_classifier_free_guidance = guidance_scale > 1.0
1198
+
1199
+ # 3. Encode input prompt
1200
+ (
1201
+ prompt_embeds,
1202
+ negative_prompt_embeds,
1203
+ pooled_prompt_embeds,
1204
+ negative_pooled_prompt_embeds,
1205
+ ) = self.encode_prompt(
1206
+ prompt=prompt,
1207
+ prompt_2=prompt_2,
1208
+ device=device,
1209
+ num_images_per_prompt=num_images_per_prompt,
1210
+ do_classifier_free_guidance=do_classifier_free_guidance,
1211
+ negative_prompt=negative_prompt,
1212
+ negative_prompt_2=negative_prompt_2,
1213
+ prompt_embeds=prompt_embeds,
1214
+ negative_prompt_embeds=negative_prompt_embeds,
1215
+ pooled_prompt_embeds=pooled_prompt_embeds,
1216
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1217
+ clip_skip=clip_skip,
1218
+ )
1219
+
1220
+ # 4. Prepare timesteps
1221
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1222
+
1223
+ timesteps = self.scheduler.timesteps
1224
+
1225
+ # 5. Prepare latent variables
1226
+ num_channels_latents = self.unet.config.in_channels
1227
+ latents = self.prepare_latents(
1228
+ batch_size * num_images_per_prompt,
1229
+ num_channels_latents,
1230
+ height,
1231
+ width,
1232
+ prompt_embeds.dtype,
1233
+ device,
1234
+ generator,
1235
+ latents,
1236
+ )
1237
+
1238
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1239
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1240
+
1241
+ # 7. Prepare added time ids & embeddings & adapter features
1242
+ if isinstance(adapter, MultiAdapter):
1243
+ adapter_state = adapter(adapter_input, adapter_conditioning_scale)
1244
+ for k, v in enumerate(adapter_state):
1245
+ adapter_state[k] = v
1246
+ else:
1247
+ adapter_state = adapter(adapter_input)
1248
+ for k, v in enumerate(adapter_state):
1249
+ adapter_state[k] = v * adapter_conditioning_scale
1250
+ if num_images_per_prompt > 1:
1251
+ for k, v in enumerate(adapter_state):
1252
+ adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
1253
+ if do_classifier_free_guidance:
1254
+ for k, v in enumerate(adapter_state):
1255
+ adapter_state[k] = torch.cat([v] * 2, dim=0)
1256
+
1257
+ # 7.2 Prepare control images
1258
+ if isinstance(controlnet, ControlNetModel):
1259
+ control_image = self.prepare_control_image(
1260
+ image=control_image,
1261
+ width=width,
1262
+ height=height,
1263
+ batch_size=batch_size * num_images_per_prompt,
1264
+ num_images_per_prompt=num_images_per_prompt,
1265
+ device=device,
1266
+ dtype=controlnet.dtype,
1267
+ do_classifier_free_guidance=do_classifier_free_guidance,
1268
+ guess_mode=guess_mode,
1269
+ )
1270
+ elif isinstance(controlnet, MultiControlNetModel):
1271
+ control_images = []
1272
+
1273
+ for control_image_ in control_image:
1274
+ control_image_ = self.prepare_control_image(
1275
+ image=control_image_,
1276
+ width=width,
1277
+ height=height,
1278
+ batch_size=batch_size * num_images_per_prompt,
1279
+ num_images_per_prompt=num_images_per_prompt,
1280
+ device=device,
1281
+ dtype=controlnet.dtype,
1282
+ do_classifier_free_guidance=do_classifier_free_guidance,
1283
+ guess_mode=guess_mode,
1284
+ )
1285
+
1286
+ control_images.append(control_image_)
1287
+
1288
+ control_image = control_images
1289
+ else:
1290
+ raise ValueError(f"{controlnet.__class__} is not supported.")
1291
+
1292
+ # 8.2 Create tensor stating which controlnets to keep
1293
+ controlnet_keep = []
1294
+ for i in range(len(timesteps)):
1295
+ keeps = [
1296
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1297
+ for s, e in zip(control_guidance_start, control_guidance_end)
1298
+ ]
1299
+ if isinstance(self.controlnet, MultiControlNetModel):
1300
+ controlnet_keep.append(keeps)
1301
+ else:
1302
+ controlnet_keep.append(keeps[0])
1303
+
1304
+ add_text_embeds = pooled_prompt_embeds
1305
+ if self.text_encoder_2 is None:
1306
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1307
+ else:
1308
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1309
+
1310
+ add_time_ids = self._get_add_time_ids(
1311
+ original_size,
1312
+ crops_coords_top_left,
1313
+ target_size,
1314
+ dtype=prompt_embeds.dtype,
1315
+ text_encoder_projection_dim=text_encoder_projection_dim,
1316
+ )
1317
+ if negative_original_size is not None and negative_target_size is not None:
1318
+ negative_add_time_ids = self._get_add_time_ids(
1319
+ negative_original_size,
1320
+ negative_crops_coords_top_left,
1321
+ negative_target_size,
1322
+ dtype=prompt_embeds.dtype,
1323
+ text_encoder_projection_dim=text_encoder_projection_dim,
1324
+ )
1325
+ else:
1326
+ negative_add_time_ids = add_time_ids
1327
+
1328
+ if do_classifier_free_guidance:
1329
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1330
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1331
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
1332
+
1333
+ prompt_embeds = prompt_embeds.to(device)
1334
+ add_text_embeds = add_text_embeds.to(device)
1335
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
1336
+
1337
+ # 8. Denoising loop
1338
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1339
+
1340
+ # 7.1 Apply denoising_end
1341
+ if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
1342
+ discrete_timestep_cutoff = int(
1343
+ round(
1344
+ self.scheduler.config.num_train_timesteps
1345
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
1346
+ )
1347
+ )
1348
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1349
+ timesteps = timesteps[:num_inference_steps]
1350
+
1351
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1352
+ for i, t in enumerate(timesteps):
1353
+ # expand the latents if we are doing classifier free guidance
1354
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1355
+
1356
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1357
+
1358
+ # predict the noise residual
1359
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
1360
+
1361
+ if i < int(num_inference_steps * adapter_conditioning_factor):
1362
+ down_intrablock_additional_residuals = [state.clone() for state in adapter_state]
1363
+ else:
1364
+ down_intrablock_additional_residuals = None
1365
+
1366
+ # ----------- ControlNet
1367
+
1368
+ # expand the latents if we are doing classifier free guidance
1369
+ latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1370
+
1371
+ # concat latents, mask, masked_image_latents in the channel dimension
1372
+ latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
1373
+
1374
+ # controlnet(s) inference
1375
+ if guess_mode and do_classifier_free_guidance:
1376
+ # Infer ControlNet only for the conditional batch.
1377
+ control_model_input = latents
1378
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1379
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1380
+ controlnet_added_cond_kwargs = {
1381
+ "text_embeds": add_text_embeds.chunk(2)[1],
1382
+ "time_ids": add_time_ids.chunk(2)[1],
1383
+ }
1384
+ else:
1385
+ control_model_input = latent_model_input_controlnet
1386
+ controlnet_prompt_embeds = prompt_embeds
1387
+ controlnet_added_cond_kwargs = added_cond_kwargs
1388
+
1389
+ if isinstance(controlnet_keep[i], list):
1390
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1391
+ else:
1392
+ controlnet_cond_scale = controlnet_conditioning_scale
1393
+ if isinstance(controlnet_cond_scale, list):
1394
+ controlnet_cond_scale = controlnet_cond_scale[0]
1395
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1396
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1397
+ control_model_input,
1398
+ t,
1399
+ encoder_hidden_states=controlnet_prompt_embeds,
1400
+ controlnet_cond=control_image,
1401
+ conditioning_scale=cond_scale,
1402
+ guess_mode=guess_mode,
1403
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1404
+ return_dict=False,
1405
+ )
1406
+
1407
+ noise_pred = self.unet(
1408
+ latent_model_input,
1409
+ t,
1410
+ encoder_hidden_states=prompt_embeds,
1411
+ cross_attention_kwargs=cross_attention_kwargs,
1412
+ added_cond_kwargs=added_cond_kwargs,
1413
+ return_dict=False,
1414
+ down_intrablock_additional_residuals=down_intrablock_additional_residuals, # t2iadapter
1415
+ down_block_additional_residuals=down_block_res_samples, # controlnet
1416
+ mid_block_additional_residual=mid_block_res_sample, # controlnet
1417
+ )[0]
1418
+
1419
+ # perform guidance
1420
+ if do_classifier_free_guidance:
1421
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1422
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1423
+
1424
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1425
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1426
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
1427
+
1428
+ # compute the previous noisy sample x_t -> x_t-1
1429
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1430
+
1431
+ # call the callback, if provided
1432
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1433
+ progress_bar.update()
1434
+ if callback is not None and i % callback_steps == 0:
1435
+ step_idx = i // getattr(self.scheduler, "order", 1)
1436
+ callback(step_idx, t, latents)
1437
+
1438
+ if not output_type == "latent":
1439
+ # make sure the VAE is in float32 mode, as it overflows in float16
1440
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1441
+
1442
+ if needs_upcasting:
1443
+ self.upcast_vae()
1444
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1445
+
1446
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1447
+
1448
+ # cast back to fp16 if needed
1449
+ if needs_upcasting:
1450
+ self.vae.to(dtype=torch.float16)
1451
+ else:
1452
+ image = latents
1453
+ return StableDiffusionXLPipelineOutput(images=image)
1454
+
1455
+ image = self.image_processor.postprocess(image, output_type=output_type)
1456
+
1457
+ # Offload all models
1458
+ self.maybe_free_model_hooks()
1459
+
1460
+ if not return_dict:
1461
+ return (image,)
1462
+
1463
+ return StableDiffusionXLPipelineOutput(images=image)
v0.26.3/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py ADDED
@@ -0,0 +1,1908 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Jake Babbidge, TencentARC and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ignore the entire file for precommit
16
+ # type: ignore
17
+
18
+ import inspect
19
+ from collections.abc import Callable
20
+ from typing import Any, List, Optional, Union
21
+
22
+ import numpy as np
23
+ import PIL
24
+ import torch
25
+ import torch.nn.functional as F
26
+ from transformers import (
27
+ CLIPTextModel,
28
+ CLIPTextModelWithProjection,
29
+ CLIPTokenizer,
30
+ )
31
+
32
+ from diffusers import DiffusionPipeline
33
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
34
+ from diffusers.loaders import (
35
+ FromSingleFileMixin,
36
+ LoraLoaderMixin,
37
+ StableDiffusionXLLoraLoaderMixin,
38
+ TextualInversionLoaderMixin,
39
+ )
40
+ from diffusers.models import (
41
+ AutoencoderKL,
42
+ ControlNetModel,
43
+ MultiAdapter,
44
+ T2IAdapter,
45
+ UNet2DConditionModel,
46
+ )
47
+ from diffusers.models.attention_processor import (
48
+ AttnProcessor2_0,
49
+ LoRAAttnProcessor2_0,
50
+ LoRAXFormersAttnProcessor,
51
+ XFormersAttnProcessor,
52
+ )
53
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
54
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
55
+ from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
56
+ from diffusers.schedulers import KarrasDiffusionSchedulers
57
+ from diffusers.utils import (
58
+ PIL_INTERPOLATION,
59
+ USE_PEFT_BACKEND,
60
+ logging,
61
+ replace_example_docstring,
62
+ scale_lora_layers,
63
+ unscale_lora_layers,
64
+ )
65
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
66
+
67
+
68
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
69
+
70
+ EXAMPLE_DOC_STRING = """
71
+ Examples:
72
+ ```py
73
+ >>> import torch
74
+ >>> from diffusers import DiffusionPipeline, T2IAdapter
75
+ >>> from diffusers.utils import load_image
76
+ >>> from PIL import Image
77
+ >>> from controlnet_aux.midas import MidasDetector
78
+
79
+ >>> adapter = T2IAdapter.from_pretrained(
80
+ ... "TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16"
81
+ ... ).to("cuda")
82
+
83
+ >>> controlnet = ControlNetModel.from_pretrained(
84
+ ... "diffusers/controlnet-depth-sdxl-1.0",
85
+ ... torch_dtype=torch.float16,
86
+ ... variant="fp16",
87
+ ... use_safetensors=True
88
+ ... ).to("cuda")
89
+
90
+ >>> pipe = DiffusionPipeline.from_pretrained(
91
+ ... "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
92
+ ... torch_dtype=torch.float16,
93
+ ... variant="fp16",
94
+ ... use_safetensors=True,
95
+ ... custom_pipeline="stable_diffusion_xl_adapter_controlnet_inpaint",
96
+ ... adapter=adapter,
97
+ ... controlnet=controlnet,
98
+ ... ).to("cuda")
99
+
100
+ >>> prompt = "a tiger sitting on a park bench"
101
+ >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
102
+ >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
103
+
104
+ >>> image = load_image(img_url).resize((1024, 1024))
105
+ >>> mask_image = load_image(mask_url).resize((1024, 1024))
106
+
107
+ >>> midas_depth = MidasDetector.from_pretrained(
108
+ ... "valhalla/t2iadapter-aux-models", filename="dpt_large_384.pt", model_type="dpt_large"
109
+ ... ).to("cuda")
110
+
111
+ >>> depth_image = midas_depth(
112
+ ... image, detect_resolution=512, image_resolution=1024
113
+ ... )
114
+
115
+ >>> strength = 0.4
116
+
117
+ >>> generator = torch.manual_seed(42)
118
+
119
+ >>> result_image = pipe(
120
+ ... image=image,
121
+ ... mask_image=mask,
122
+ ... adapter_image=depth_image,
123
+ ... control_image=depth_image,
124
+ ... controlnet_conditioning_scale=strength,
125
+ ... adapter_conditioning_scale=strength,
126
+ ... strength=0.7,
127
+ ... generator=generator,
128
+ ... prompt=prompt,
129
+ ... negative_prompt="extra digit, fewer digits, cropped, worst quality, low quality",
130
+ ... num_inference_steps=50
131
+ ... ).images[0]
132
+ ```
133
+ """
134
+
135
+
136
+ def _preprocess_adapter_image(image, height, width):
137
+ if isinstance(image, torch.Tensor):
138
+ return image
139
+ elif isinstance(image, PIL.Image.Image):
140
+ image = [image]
141
+
142
+ if isinstance(image[0], PIL.Image.Image):
143
+ image = [np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"])) for i in image]
144
+ image = [
145
+ i[None, ..., None] if i.ndim == 2 else i[None, ...] for i in image
146
+ ] # expand [h, w] or [h, w, c] to [b, h, w, c]
147
+ image = np.concatenate(image, axis=0)
148
+ image = np.array(image).astype(np.float32) / 255.0
149
+ image = image.transpose(0, 3, 1, 2)
150
+ image = torch.from_numpy(image)
151
+ elif isinstance(image[0], torch.Tensor):
152
+ if image[0].ndim == 3:
153
+ image = torch.stack(image, dim=0)
154
+ elif image[0].ndim == 4:
155
+ image = torch.cat(image, dim=0)
156
+ else:
157
+ raise ValueError(
158
+ f"Invalid image tensor! Expecting image tensor with 3 or 4 dimension, but recive: {image[0].ndim}"
159
+ )
160
+ return image
161
+
162
+
163
+ def mask_pil_to_torch(mask, height, width):
164
+ # preprocess mask
165
+ if isinstance(mask, Union[PIL.Image.Image, np.ndarray]):
166
+ mask = [mask]
167
+
168
+ if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
169
+ mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
170
+ mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
171
+ mask = mask.astype(np.float32) / 255.0
172
+ elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
173
+ mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
174
+
175
+ mask = torch.from_numpy(mask)
176
+ return mask
177
+
178
+
179
+ def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool = False):
180
+ """
181
+ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
182
+ converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
183
+ ``image`` and ``1`` for the ``mask``.
184
+
185
+ The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
186
+ binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
187
+
188
+ Args:
189
+ image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
190
+ It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
191
+ ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
192
+ mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
193
+ It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
194
+ ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
195
+
196
+
197
+ Raises:
198
+ ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
199
+ should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
200
+ TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
201
+ (ot the other way around).
202
+
203
+ Returns:
204
+ tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
205
+ dimensions: ``batch x channels x height x width``.
206
+ """
207
+
208
+ # checkpoint. TOD(Yiyi) - need to clean this up later
209
+ if image is None:
210
+ raise ValueError("`image` input cannot be undefined.")
211
+
212
+ if mask is None:
213
+ raise ValueError("`mask_image` input cannot be undefined.")
214
+
215
+ if isinstance(image, torch.Tensor):
216
+ if not isinstance(mask, torch.Tensor):
217
+ mask = mask_pil_to_torch(mask, height, width)
218
+
219
+ if image.ndim == 3:
220
+ image = image.unsqueeze(0)
221
+
222
+ # Batch and add channel dim for single mask
223
+ if mask.ndim == 2:
224
+ mask = mask.unsqueeze(0).unsqueeze(0)
225
+
226
+ # Batch single mask or add channel dim
227
+ if mask.ndim == 3:
228
+ # Single batched mask, no channel dim or single mask not batched but channel dim
229
+ if mask.shape[0] == 1:
230
+ mask = mask.unsqueeze(0)
231
+
232
+ # Batched masks no channel dim
233
+ else:
234
+ mask = mask.unsqueeze(1)
235
+
236
+ assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
237
+ # assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
238
+ assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
239
+
240
+ # Check image is in [-1, 1]
241
+ # if image.min() < -1 or image.max() > 1:
242
+ # raise ValueError("Image should be in [-1, 1] range")
243
+
244
+ # Check mask is in [0, 1]
245
+ if mask.min() < 0 or mask.max() > 1:
246
+ raise ValueError("Mask should be in [0, 1] range")
247
+
248
+ # Binarize mask
249
+ mask[mask < 0.5] = 0
250
+ mask[mask >= 0.5] = 1
251
+
252
+ # Image as float32
253
+ image = image.to(dtype=torch.float32)
254
+ elif isinstance(mask, torch.Tensor):
255
+ raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
256
+ else:
257
+ # preprocess image
258
+ if isinstance(image, Union[PIL.Image.Image, np.ndarray]):
259
+ image = [image]
260
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
261
+ # resize all images w.r.t passed height an width
262
+ image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
263
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
264
+ image = np.concatenate(image, axis=0)
265
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
266
+ image = np.concatenate([i[None, :] for i in image], axis=0)
267
+
268
+ image = image.transpose(0, 3, 1, 2)
269
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
270
+
271
+ mask = mask_pil_to_torch(mask, height, width)
272
+ mask[mask < 0.5] = 0
273
+ mask[mask >= 0.5] = 1
274
+
275
+ if image.shape[1] == 4:
276
+ # images are in latent space and thus can't
277
+ # be masked set masked_image to None
278
+ # we assume that the checkpoint is not an inpainting
279
+ # checkpoint. TOD(Yiyi) - need to clean this up later
280
+ masked_image = None
281
+ else:
282
+ masked_image = image * (mask < 0.5)
283
+
284
+ # n.b. ensure backwards compatibility as old function does not return image
285
+ if return_image:
286
+ return mask, masked_image, image
287
+
288
+ return mask, masked_image
289
+
290
+
291
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
292
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
293
+ """
294
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
295
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
296
+ """
297
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
298
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
299
+ # rescale the results from guidance (fixes overexposure)
300
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
301
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
302
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
303
+ return noise_cfg
304
+
305
+
306
+ class StableDiffusionXLControlNetAdapterInpaintPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin):
307
+ r"""
308
+ Pipeline for text-to-image generation using Stable Diffusion augmented with T2I-Adapter
309
+ https://arxiv.org/abs/2302.08453
310
+
311
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
312
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
313
+
314
+ Args:
315
+ adapter ([`T2IAdapter`] or [`MultiAdapter`] or `List[T2IAdapter]`):
316
+ Provides additional conditioning to the unet during the denoising process. If you set multiple Adapter as a
317
+ list, the outputs from each Adapter are added together to create one combined additional conditioning.
318
+ adapter_weights (`List[float]`, *optional*, defaults to None):
319
+ List of floats representing the weight which will be multiply to each adapter's output before adding them
320
+ together.
321
+ vae ([`AutoencoderKL`]):
322
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
323
+ text_encoder ([`CLIPTextModel`]):
324
+ Frozen text-encoder. Stable Diffusion uses the text portion of
325
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
326
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
327
+ tokenizer (`CLIPTokenizer`):
328
+ Tokenizer of class
329
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
330
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
331
+ scheduler ([`SchedulerMixin`]):
332
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
333
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
334
+ safety_checker ([`StableDiffusionSafetyChecker`]):
335
+ Classification module that estimates whether generated images could be considered offensive or harmful.
336
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
337
+ feature_extractor ([`CLIPFeatureExtractor`]):
338
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
339
+ requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
340
+ Whether the `unet` requires a aesthetic_score condition to be passed during inference. Also see the config
341
+ of `stabilityai/stable-diffusion-xl-refiner-1-0`.
342
+ force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
343
+ Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
344
+ `stabilityai/stable-diffusion-xl-base-1-0`.
345
+ """
346
+
347
+ def __init__(
348
+ self,
349
+ vae: AutoencoderKL,
350
+ text_encoder: CLIPTextModel,
351
+ text_encoder_2: CLIPTextModelWithProjection,
352
+ tokenizer: CLIPTokenizer,
353
+ tokenizer_2: CLIPTokenizer,
354
+ unet: UNet2DConditionModel,
355
+ adapter: Union[T2IAdapter, MultiAdapter],
356
+ controlnet: Union[ControlNetModel, MultiControlNetModel],
357
+ scheduler: KarrasDiffusionSchedulers,
358
+ requires_aesthetics_score: bool = False,
359
+ force_zeros_for_empty_prompt: bool = True,
360
+ ):
361
+ super().__init__()
362
+
363
+ if isinstance(controlnet, (list, tuple)):
364
+ controlnet = MultiControlNetModel(controlnet)
365
+
366
+ self.register_modules(
367
+ vae=vae,
368
+ text_encoder=text_encoder,
369
+ text_encoder_2=text_encoder_2,
370
+ tokenizer=tokenizer,
371
+ tokenizer_2=tokenizer_2,
372
+ unet=unet,
373
+ adapter=adapter,
374
+ controlnet=controlnet,
375
+ scheduler=scheduler,
376
+ )
377
+ self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
378
+ self.register_to_config(requires_aesthetics_score=requires_aesthetics_score)
379
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
380
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
381
+ self.control_image_processor = VaeImageProcessor(
382
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
383
+ )
384
+ self.default_sample_size = self.unet.config.sample_size
385
+
386
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
387
+ def enable_vae_slicing(self):
388
+ r"""
389
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
390
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
391
+ """
392
+ self.vae.enable_slicing()
393
+
394
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
395
+ def disable_vae_slicing(self):
396
+ r"""
397
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
398
+ computing decoding in one step.
399
+ """
400
+ self.vae.disable_slicing()
401
+
402
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
403
+ def enable_vae_tiling(self):
404
+ r"""
405
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
406
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
407
+ processing larger images.
408
+ """
409
+ self.vae.enable_tiling()
410
+
411
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
412
+ def disable_vae_tiling(self):
413
+ r"""
414
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
415
+ computing decoding in one step.
416
+ """
417
+ self.vae.disable_tiling()
418
+
419
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt
420
+ def encode_prompt(
421
+ self,
422
+ prompt: str,
423
+ prompt_2: Optional[str] = None,
424
+ device: Optional[torch.device] = None,
425
+ num_images_per_prompt: int = 1,
426
+ do_classifier_free_guidance: bool = True,
427
+ negative_prompt: Optional[str] = None,
428
+ negative_prompt_2: Optional[str] = None,
429
+ prompt_embeds: Optional[torch.FloatTensor] = None,
430
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
431
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
432
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
433
+ lora_scale: Optional[float] = None,
434
+ clip_skip: Optional[int] = None,
435
+ ):
436
+ r"""
437
+ Encodes the prompt into text encoder hidden states.
438
+
439
+ Args:
440
+ prompt (`str` or `List[str]`, *optional*):
441
+ prompt to be encoded
442
+ prompt_2 (`str` or `List[str]`, *optional*):
443
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
444
+ used in both text-encoders
445
+ device: (`torch.device`):
446
+ torch device
447
+ num_images_per_prompt (`int`):
448
+ number of images that should be generated per prompt
449
+ do_classifier_free_guidance (`bool`):
450
+ whether to use classifier free guidance or not
451
+ negative_prompt (`str` or `List[str]`, *optional*):
452
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
453
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
454
+ less than `1`).
455
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
456
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
457
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
458
+ prompt_embeds (`torch.FloatTensor`, *optional*):
459
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
460
+ provided, text embeddings will be generated from `prompt` input argument.
461
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
462
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
463
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
464
+ argument.
465
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
466
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
467
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
468
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
469
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
470
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
471
+ input argument.
472
+ lora_scale (`float`, *optional*):
473
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
474
+ clip_skip (`int`, *optional*):
475
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
476
+ the output of the pre-final layer will be used for computing the prompt embeddings.
477
+ """
478
+ device = device or self._execution_device
479
+
480
+ # set lora scale so that monkey patched LoRA
481
+ # function of text encoder can correctly access it
482
+ if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
483
+ self._lora_scale = lora_scale
484
+
485
+ # dynamically adjust the LoRA scale
486
+ if self.text_encoder is not None:
487
+ if not USE_PEFT_BACKEND:
488
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
489
+ else:
490
+ scale_lora_layers(self.text_encoder, lora_scale)
491
+
492
+ if self.text_encoder_2 is not None:
493
+ if not USE_PEFT_BACKEND:
494
+ adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
495
+ else:
496
+ scale_lora_layers(self.text_encoder_2, lora_scale)
497
+
498
+ prompt = [prompt] if isinstance(prompt, str) else prompt
499
+
500
+ if prompt is not None:
501
+ batch_size = len(prompt)
502
+ else:
503
+ batch_size = prompt_embeds.shape[0]
504
+
505
+ # Define tokenizers and text encoders
506
+ tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
507
+ text_encoders = (
508
+ [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
509
+ )
510
+
511
+ if prompt_embeds is None:
512
+ prompt_2 = prompt_2 or prompt
513
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
514
+
515
+ # textual inversion: procecss multi-vector tokens if necessary
516
+ prompt_embeds_list = []
517
+ prompts = [prompt, prompt_2]
518
+ for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
519
+ if isinstance(self, TextualInversionLoaderMixin):
520
+ prompt = self.maybe_convert_prompt(prompt, tokenizer)
521
+
522
+ text_inputs = tokenizer(
523
+ prompt,
524
+ padding="max_length",
525
+ max_length=tokenizer.model_max_length,
526
+ truncation=True,
527
+ return_tensors="pt",
528
+ )
529
+
530
+ text_input_ids = text_inputs.input_ids
531
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
532
+
533
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
534
+ text_input_ids, untruncated_ids
535
+ ):
536
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
537
+ logger.warning(
538
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
539
+ f" {tokenizer.model_max_length} tokens: {removed_text}"
540
+ )
541
+
542
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
543
+
544
+ # We are only ALWAYS interested in the pooled output of the final text encoder
545
+ pooled_prompt_embeds = prompt_embeds[0]
546
+ if clip_skip is None:
547
+ prompt_embeds = prompt_embeds.hidden_states[-2]
548
+ else:
549
+ # "2" because SDXL always indexes from the penultimate layer.
550
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
551
+
552
+ prompt_embeds_list.append(prompt_embeds)
553
+
554
+ prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
555
+
556
+ # get unconditional embeddings for classifier free guidance
557
+ zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
558
+ if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
559
+ negative_prompt_embeds = torch.zeros_like(prompt_embeds)
560
+ negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
561
+ elif do_classifier_free_guidance and negative_prompt_embeds is None:
562
+ negative_prompt = negative_prompt or ""
563
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
564
+
565
+ # normalize str to list
566
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
567
+ negative_prompt_2 = (
568
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
569
+ )
570
+
571
+ uncond_tokens: List[str]
572
+ if prompt is not None and type(prompt) is not type(negative_prompt):
573
+ raise TypeError(
574
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
575
+ f" {type(prompt)}."
576
+ )
577
+ elif batch_size != len(negative_prompt):
578
+ raise ValueError(
579
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
580
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
581
+ " the batch size of `prompt`."
582
+ )
583
+ else:
584
+ uncond_tokens = [negative_prompt, negative_prompt_2]
585
+
586
+ negative_prompt_embeds_list = []
587
+ for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
588
+ if isinstance(self, TextualInversionLoaderMixin):
589
+ negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
590
+
591
+ max_length = prompt_embeds.shape[1]
592
+ uncond_input = tokenizer(
593
+ negative_prompt,
594
+ padding="max_length",
595
+ max_length=max_length,
596
+ truncation=True,
597
+ return_tensors="pt",
598
+ )
599
+
600
+ negative_prompt_embeds = text_encoder(
601
+ uncond_input.input_ids.to(device),
602
+ output_hidden_states=True,
603
+ )
604
+ # We are only ALWAYS interested in the pooled output of the final text encoder
605
+ negative_pooled_prompt_embeds = negative_prompt_embeds[0]
606
+ negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
607
+
608
+ negative_prompt_embeds_list.append(negative_prompt_embeds)
609
+
610
+ negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
611
+
612
+ if self.text_encoder_2 is not None:
613
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
614
+ else:
615
+ prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
616
+
617
+ bs_embed, seq_len, _ = prompt_embeds.shape
618
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
619
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
620
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
621
+
622
+ if do_classifier_free_guidance:
623
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
624
+ seq_len = negative_prompt_embeds.shape[1]
625
+
626
+ if self.text_encoder_2 is not None:
627
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
628
+ else:
629
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
630
+
631
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
632
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
633
+
634
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
635
+ bs_embed * num_images_per_prompt, -1
636
+ )
637
+ if do_classifier_free_guidance:
638
+ negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
639
+ bs_embed * num_images_per_prompt, -1
640
+ )
641
+
642
+ if self.text_encoder is not None:
643
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
644
+ # Retrieve the original scale by scaling back the LoRA layers
645
+ unscale_lora_layers(self.text_encoder, lora_scale)
646
+
647
+ if self.text_encoder_2 is not None:
648
+ if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
649
+ # Retrieve the original scale by scaling back the LoRA layers
650
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
651
+
652
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
653
+
654
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
655
+ def prepare_extra_step_kwargs(self, generator, eta):
656
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
657
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
658
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
659
+ # and should be between [0, 1]
660
+
661
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
662
+ extra_step_kwargs = {}
663
+ if accepts_eta:
664
+ extra_step_kwargs["eta"] = eta
665
+
666
+ # check if the scheduler accepts generator
667
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
668
+ if accepts_generator:
669
+ extra_step_kwargs["generator"] = generator
670
+ return extra_step_kwargs
671
+
672
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
673
+ def check_image(self, image, prompt, prompt_embeds):
674
+ image_is_pil = isinstance(image, PIL.Image.Image)
675
+ image_is_tensor = isinstance(image, torch.Tensor)
676
+ image_is_np = isinstance(image, np.ndarray)
677
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
678
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
679
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
680
+
681
+ if (
682
+ not image_is_pil
683
+ and not image_is_tensor
684
+ and not image_is_np
685
+ and not image_is_pil_list
686
+ and not image_is_tensor_list
687
+ and not image_is_np_list
688
+ ):
689
+ raise TypeError(
690
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
691
+ )
692
+
693
+ if image_is_pil:
694
+ image_batch_size = 1
695
+ else:
696
+ image_batch_size = len(image)
697
+
698
+ if prompt is not None and isinstance(prompt, str):
699
+ prompt_batch_size = 1
700
+ elif prompt is not None and isinstance(prompt, list):
701
+ prompt_batch_size = len(prompt)
702
+ elif prompt_embeds is not None:
703
+ prompt_batch_size = prompt_embeds.shape[0]
704
+
705
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
706
+ raise ValueError(
707
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
708
+ )
709
+
710
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.check_inputs
711
+ def check_inputs(
712
+ self,
713
+ prompt,
714
+ prompt_2,
715
+ height,
716
+ width,
717
+ callback_steps,
718
+ negative_prompt=None,
719
+ negative_prompt_2=None,
720
+ prompt_embeds=None,
721
+ negative_prompt_embeds=None,
722
+ pooled_prompt_embeds=None,
723
+ negative_pooled_prompt_embeds=None,
724
+ callback_on_step_end_tensor_inputs=None,
725
+ ):
726
+ if height % 8 != 0 or width % 8 != 0:
727
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
728
+
729
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
730
+ raise ValueError(
731
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
732
+ f" {type(callback_steps)}."
733
+ )
734
+
735
+ if callback_on_step_end_tensor_inputs is not None and not all(
736
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
737
+ ):
738
+ raise ValueError(
739
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
740
+ )
741
+
742
+ if prompt is not None and prompt_embeds is not None:
743
+ raise ValueError(
744
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
745
+ " only forward one of the two."
746
+ )
747
+ elif prompt_2 is not None and prompt_embeds is not None:
748
+ raise ValueError(
749
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
750
+ " only forward one of the two."
751
+ )
752
+ elif prompt is None and prompt_embeds is None:
753
+ raise ValueError(
754
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
755
+ )
756
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
757
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
758
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
759
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
760
+
761
+ if negative_prompt is not None and negative_prompt_embeds is not None:
762
+ raise ValueError(
763
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
764
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
765
+ )
766
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
767
+ raise ValueError(
768
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
769
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
770
+ )
771
+
772
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
773
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
774
+ raise ValueError(
775
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
776
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
777
+ f" {negative_prompt_embeds.shape}."
778
+ )
779
+
780
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
781
+ raise ValueError(
782
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
783
+ )
784
+
785
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
786
+ raise ValueError(
787
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
788
+ )
789
+
790
+ def check_conditions(
791
+ self,
792
+ prompt,
793
+ prompt_embeds,
794
+ adapter_image,
795
+ control_image,
796
+ adapter_conditioning_scale,
797
+ controlnet_conditioning_scale,
798
+ control_guidance_start,
799
+ control_guidance_end,
800
+ ):
801
+ # controlnet checks
802
+ if not isinstance(control_guidance_start, (tuple, list)):
803
+ control_guidance_start = [control_guidance_start]
804
+
805
+ if not isinstance(control_guidance_end, (tuple, list)):
806
+ control_guidance_end = [control_guidance_end]
807
+
808
+ if len(control_guidance_start) != len(control_guidance_end):
809
+ raise ValueError(
810
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
811
+ )
812
+
813
+ if isinstance(self.controlnet, MultiControlNetModel):
814
+ if len(control_guidance_start) != len(self.controlnet.nets):
815
+ raise ValueError(
816
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
817
+ )
818
+
819
+ for start, end in zip(control_guidance_start, control_guidance_end):
820
+ if start >= end:
821
+ raise ValueError(
822
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
823
+ )
824
+ if start < 0.0:
825
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
826
+ if end > 1.0:
827
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
828
+
829
+ # Check controlnet `image`
830
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
831
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
832
+ )
833
+ if (
834
+ isinstance(self.controlnet, ControlNetModel)
835
+ or is_compiled
836
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
837
+ ):
838
+ self.check_image(control_image, prompt, prompt_embeds)
839
+ elif (
840
+ isinstance(self.controlnet, MultiControlNetModel)
841
+ or is_compiled
842
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
843
+ ):
844
+ if not isinstance(control_image, list):
845
+ raise TypeError("For multiple controlnets: `control_image` must be type `list`")
846
+
847
+ # When `image` is a nested list:
848
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
849
+ elif any(isinstance(i, list) for i in control_image):
850
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
851
+ elif len(control_image) != len(self.controlnet.nets):
852
+ raise ValueError(
853
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(control_image)} images and {len(self.controlnet.nets)} ControlNets."
854
+ )
855
+
856
+ for image_ in control_image:
857
+ self.check_image(image_, prompt, prompt_embeds)
858
+ else:
859
+ assert False
860
+
861
+ # Check `controlnet_conditioning_scale`
862
+ if (
863
+ isinstance(self.controlnet, ControlNetModel)
864
+ or is_compiled
865
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
866
+ ):
867
+ if not isinstance(controlnet_conditioning_scale, float):
868
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
869
+ elif (
870
+ isinstance(self.controlnet, MultiControlNetModel)
871
+ or is_compiled
872
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
873
+ ):
874
+ if isinstance(controlnet_conditioning_scale, list):
875
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
876
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
877
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
878
+ self.controlnet.nets
879
+ ):
880
+ raise ValueError(
881
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
882
+ " the same length as the number of controlnets"
883
+ )
884
+ else:
885
+ assert False
886
+
887
+ # adapter checks
888
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
889
+ self.check_image(adapter_image, prompt, prompt_embeds)
890
+ elif (
891
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
892
+ ):
893
+ if not isinstance(adapter_image, list):
894
+ raise TypeError("For multiple adapters: `adapter_image` must be type `list`")
895
+
896
+ # When `image` is a nested list:
897
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
898
+ elif any(isinstance(i, list) for i in adapter_image):
899
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
900
+ elif len(adapter_image) != len(self.adapter.adapters):
901
+ raise ValueError(
902
+ f"For multiple adapters: `image` must have the same length as the number of adapters, but got {len(adapter_image)} images and {len(self.adapters.nets)} Adapters."
903
+ )
904
+
905
+ for image_ in adapter_image:
906
+ self.check_image(image_, prompt, prompt_embeds)
907
+ else:
908
+ assert False
909
+
910
+ # Check `adapter_conditioning_scale`
911
+ if isinstance(self.adapter, T2IAdapter) or is_compiled and isinstance(self.adapter._orig_mod, T2IAdapter):
912
+ if not isinstance(adapter_conditioning_scale, float):
913
+ raise TypeError("For single adapter: `adapter_conditioning_scale` must be type `float`.")
914
+ elif (
915
+ isinstance(self.adapter, MultiAdapter) or is_compiled and isinstance(self.adapter._orig_mod, MultiAdapter)
916
+ ):
917
+ if isinstance(adapter_conditioning_scale, list):
918
+ if any(isinstance(i, list) for i in adapter_conditioning_scale):
919
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
920
+ elif isinstance(adapter_conditioning_scale, list) and len(adapter_conditioning_scale) != len(
921
+ self.adapter.adapters
922
+ ):
923
+ raise ValueError(
924
+ "For multiple adapters: When `adapter_conditioning_scale` is specified as `list`, it must have"
925
+ " the same length as the number of adapters"
926
+ )
927
+ else:
928
+ assert False
929
+
930
+ def prepare_latents(
931
+ self,
932
+ batch_size,
933
+ num_channels_latents,
934
+ height,
935
+ width,
936
+ dtype,
937
+ device,
938
+ generator,
939
+ latents=None,
940
+ image=None,
941
+ timestep=None,
942
+ is_strength_max=True,
943
+ add_noise=True,
944
+ return_noise=False,
945
+ return_image_latents=False,
946
+ ):
947
+ shape = (
948
+ batch_size,
949
+ num_channels_latents,
950
+ height // self.vae_scale_factor,
951
+ width // self.vae_scale_factor,
952
+ )
953
+ if isinstance(generator, list) and len(generator) != batch_size:
954
+ raise ValueError(
955
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
956
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
957
+ )
958
+
959
+ if (image is None or timestep is None) and not is_strength_max:
960
+ raise ValueError(
961
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
962
+ "However, either the image or the noise timestep has not been provided."
963
+ )
964
+
965
+ if image.shape[1] == 4:
966
+ image_latents = image.to(device=device, dtype=dtype)
967
+ elif return_image_latents or (latents is None and not is_strength_max):
968
+ image = image.to(device=device, dtype=dtype)
969
+ image_latents = self._encode_vae_image(image=image, generator=generator)
970
+
971
+ image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1)
972
+
973
+ if latents is None and add_noise:
974
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
975
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
976
+ latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
977
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
978
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
979
+ elif add_noise:
980
+ noise = latents.to(device)
981
+ latents = noise * self.scheduler.init_noise_sigma
982
+ else:
983
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
984
+ latents = image_latents.to(device)
985
+
986
+ outputs = (latents,)
987
+
988
+ if return_noise:
989
+ outputs += (noise,)
990
+
991
+ if return_image_latents:
992
+ outputs += (image_latents,)
993
+
994
+ return outputs
995
+
996
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
997
+ dtype = image.dtype
998
+ if self.vae.config.force_upcast:
999
+ image = image.float()
1000
+ self.vae.to(dtype=torch.float32)
1001
+
1002
+ if isinstance(generator, list):
1003
+ image_latents = [
1004
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
1005
+ for i in range(image.shape[0])
1006
+ ]
1007
+ image_latents = torch.cat(image_latents, dim=0)
1008
+ else:
1009
+ image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
1010
+
1011
+ if self.vae.config.force_upcast:
1012
+ self.vae.to(dtype)
1013
+
1014
+ image_latents = image_latents.to(dtype)
1015
+ image_latents = self.vae.config.scaling_factor * image_latents
1016
+
1017
+ return image_latents
1018
+
1019
+ def prepare_mask_latents(
1020
+ self,
1021
+ mask,
1022
+ masked_image,
1023
+ batch_size,
1024
+ height,
1025
+ width,
1026
+ dtype,
1027
+ device,
1028
+ generator,
1029
+ do_classifier_free_guidance,
1030
+ ):
1031
+ # resize the mask to latents shape as we concatenate the mask to the latents
1032
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
1033
+ # and half precision
1034
+ mask = torch.nn.functional.interpolate(
1035
+ mask,
1036
+ size=(
1037
+ height // self.vae_scale_factor,
1038
+ width // self.vae_scale_factor,
1039
+ ),
1040
+ )
1041
+ mask = mask.to(device=device, dtype=dtype)
1042
+
1043
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
1044
+ if mask.shape[0] < batch_size:
1045
+ if not batch_size % mask.shape[0] == 0:
1046
+ raise ValueError(
1047
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
1048
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
1049
+ " of masks that you pass is divisible by the total requested batch size."
1050
+ )
1051
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
1052
+
1053
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
1054
+
1055
+ masked_image_latents = None
1056
+ if masked_image is not None:
1057
+ masked_image = masked_image.to(device=device, dtype=dtype)
1058
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
1059
+ if masked_image_latents.shape[0] < batch_size:
1060
+ if not batch_size % masked_image_latents.shape[0] == 0:
1061
+ raise ValueError(
1062
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
1063
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
1064
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
1065
+ )
1066
+ masked_image_latents = masked_image_latents.repeat(
1067
+ batch_size // masked_image_latents.shape[0], 1, 1, 1
1068
+ )
1069
+
1070
+ masked_image_latents = (
1071
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
1072
+ )
1073
+
1074
+ # aligning device to prevent device errors when concating it with the latent model input
1075
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
1076
+
1077
+ return mask, masked_image_latents
1078
+
1079
+ # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps
1080
+ def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None):
1081
+ # get the original timestep using init_timestep
1082
+ if denoising_start is None:
1083
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
1084
+ t_start = max(num_inference_steps - init_timestep, 0)
1085
+ else:
1086
+ t_start = 0
1087
+
1088
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
1089
+
1090
+ # Strength is irrelevant if we directly request a timestep to start at;
1091
+ # that is, strength is determined by the denoising_start instead.
1092
+ if denoising_start is not None:
1093
+ discrete_timestep_cutoff = int(
1094
+ round(
1095
+ self.scheduler.config.num_train_timesteps
1096
+ - (denoising_start * self.scheduler.config.num_train_timesteps)
1097
+ )
1098
+ )
1099
+
1100
+ num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item()
1101
+ if self.scheduler.order == 2 and num_inference_steps % 2 == 0:
1102
+ # if the scheduler is a 2nd order scheduler we might have to do +1
1103
+ # because `num_inference_steps` might be even given that every timestep
1104
+ # (except the highest one) is duplicated. If `num_inference_steps` is even it would
1105
+ # mean that we cut the timesteps in the middle of the denoising step
1106
+ # (between 1st and 2nd devirative) which leads to incorrect results. By adding 1
1107
+ # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler
1108
+ num_inference_steps = num_inference_steps + 1
1109
+
1110
+ # because t_n+1 >= t_n, we slice the timesteps starting from the end
1111
+ timesteps = timesteps[-num_inference_steps:]
1112
+ return timesteps, num_inference_steps
1113
+
1114
+ return timesteps, num_inference_steps - t_start
1115
+
1116
+ def _get_add_time_ids(
1117
+ self,
1118
+ original_size,
1119
+ crops_coords_top_left,
1120
+ target_size,
1121
+ aesthetic_score,
1122
+ negative_aesthetic_score,
1123
+ dtype,
1124
+ text_encoder_projection_dim=None,
1125
+ ):
1126
+ if self.config.requires_aesthetics_score:
1127
+ add_time_ids = list(original_size + crops_coords_top_left + (aesthetic_score,))
1128
+ add_neg_time_ids = list(original_size + crops_coords_top_left + (negative_aesthetic_score,))
1129
+ else:
1130
+ add_time_ids = list(original_size + crops_coords_top_left + target_size)
1131
+ add_neg_time_ids = list(original_size + crops_coords_top_left + target_size)
1132
+
1133
+ passed_add_embed_dim = (
1134
+ self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
1135
+ )
1136
+ expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
1137
+
1138
+ if (
1139
+ expected_add_embed_dim > passed_add_embed_dim
1140
+ and (expected_add_embed_dim - passed_add_embed_dim) == self.unet.config.addition_time_embed_dim
1141
+ ):
1142
+ raise ValueError(
1143
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to enable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=True)` to make sure `aesthetic_score` {aesthetic_score} and `negative_aesthetic_score` {negative_aesthetic_score} is correctly used by the model."
1144
+ )
1145
+ elif (
1146
+ expected_add_embed_dim < passed_add_embed_dim
1147
+ and (passed_add_embed_dim - expected_add_embed_dim) == self.unet.config.addition_time_embed_dim
1148
+ ):
1149
+ raise ValueError(
1150
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. Please make sure to disable `requires_aesthetics_score` with `pipe.register_to_config(requires_aesthetics_score=False)` to make sure `target_size` {target_size} is correctly used by the model."
1151
+ )
1152
+ elif expected_add_embed_dim != passed_add_embed_dim:
1153
+ raise ValueError(
1154
+ f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
1155
+ )
1156
+
1157
+ add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
1158
+ add_neg_time_ids = torch.tensor([add_neg_time_ids], dtype=dtype)
1159
+
1160
+ return add_time_ids, add_neg_time_ids
1161
+
1162
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
1163
+ def upcast_vae(self):
1164
+ dtype = self.vae.dtype
1165
+ self.vae.to(dtype=torch.float32)
1166
+ use_torch_2_0_or_xformers = isinstance(
1167
+ self.vae.decoder.mid_block.attentions[0].processor,
1168
+ (
1169
+ AttnProcessor2_0,
1170
+ XFormersAttnProcessor,
1171
+ LoRAXFormersAttnProcessor,
1172
+ LoRAAttnProcessor2_0,
1173
+ ),
1174
+ )
1175
+ # if xformers or torch_2_0 is used attention block does not need
1176
+ # to be in float32 which can save lots of memory
1177
+ if use_torch_2_0_or_xformers:
1178
+ self.vae.post_quant_conv.to(dtype)
1179
+ self.vae.decoder.conv_in.to(dtype)
1180
+ self.vae.decoder.mid_block.to(dtype)
1181
+
1182
+ # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
1183
+ def _default_height_width(self, height, width, image):
1184
+ # NOTE: It is possible that a list of images have different
1185
+ # dimensions for each image, so just checking the first image
1186
+ # is not _exactly_ correct, but it is simple.
1187
+ while isinstance(image, list):
1188
+ image = image[0]
1189
+
1190
+ if height is None:
1191
+ if isinstance(image, PIL.Image.Image):
1192
+ height = image.height
1193
+ elif isinstance(image, torch.Tensor):
1194
+ height = image.shape[-2]
1195
+
1196
+ # round down to nearest multiple of `self.adapter.downscale_factor`
1197
+ height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
1198
+
1199
+ if width is None:
1200
+ if isinstance(image, PIL.Image.Image):
1201
+ width = image.width
1202
+ elif isinstance(image, torch.Tensor):
1203
+ width = image.shape[-1]
1204
+
1205
+ # round down to nearest multiple of `self.adapter.downscale_factor`
1206
+ width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
1207
+
1208
+ return height, width
1209
+
1210
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_freeu
1211
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
1212
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
1213
+
1214
+ The suffixes after the scaling factors represent the stages where they are being applied.
1215
+
1216
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
1217
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
1218
+
1219
+ Args:
1220
+ s1 (`float`):
1221
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
1222
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1223
+ s2 (`float`):
1224
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
1225
+ mitigate "oversmoothing effect" in the enhanced denoising process.
1226
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
1227
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
1228
+ """
1229
+ if not hasattr(self, "unet"):
1230
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
1231
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
1232
+
1233
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_freeu
1234
+ def disable_freeu(self):
1235
+ """Disables the FreeU mechanism if enabled."""
1236
+ self.unet.disable_freeu()
1237
+
1238
+ def prepare_control_image(
1239
+ self,
1240
+ image,
1241
+ width,
1242
+ height,
1243
+ batch_size,
1244
+ num_images_per_prompt,
1245
+ device,
1246
+ dtype,
1247
+ do_classifier_free_guidance=False,
1248
+ guess_mode=False,
1249
+ ):
1250
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
1251
+ image_batch_size = image.shape[0]
1252
+
1253
+ if image_batch_size == 1:
1254
+ repeat_by = batch_size
1255
+ else:
1256
+ # image batch size is the same as prompt batch size
1257
+ repeat_by = num_images_per_prompt
1258
+
1259
+ image = image.repeat_interleave(repeat_by, dim=0)
1260
+
1261
+ image = image.to(device=device, dtype=dtype)
1262
+
1263
+ if do_classifier_free_guidance and not guess_mode:
1264
+ image = torch.cat([image] * 2)
1265
+
1266
+ return image
1267
+
1268
+ @torch.no_grad()
1269
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
1270
+ def __call__(
1271
+ self,
1272
+ prompt: Optional[Union[str, list[str]]] = None,
1273
+ prompt_2: Optional[Union[str, list[str]]] = None,
1274
+ image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
1275
+ mask_image: Optional[Union[torch.Tensor, PIL.Image.Image]] = None,
1276
+ adapter_image: PipelineImageInput = None,
1277
+ control_image: PipelineImageInput = None,
1278
+ height: Optional[int] = None,
1279
+ width: Optional[int] = None,
1280
+ strength: float = 0.9999,
1281
+ num_inference_steps: int = 50,
1282
+ denoising_start: Optional[float] = None,
1283
+ denoising_end: Optional[float] = None,
1284
+ guidance_scale: float = 5.0,
1285
+ negative_prompt: Optional[Union[str, list[str]]] = None,
1286
+ negative_prompt_2: Optional[Union[str, list[str]]] = None,
1287
+ num_images_per_prompt: Optional[int] = 1,
1288
+ eta: float = 0.0,
1289
+ generator: Optional[Union[torch.Generator, list[torch.Generator]]] = None,
1290
+ latents: Optional[Union[torch.FloatTensor]] = None,
1291
+ prompt_embeds: Optional[torch.FloatTensor] = None,
1292
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1293
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1294
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1295
+ output_type: Optional[str] = "pil",
1296
+ return_dict: bool = True,
1297
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
1298
+ callback_steps: int = 1,
1299
+ cross_attention_kwargs: Optional[dict[str, Any]] = None,
1300
+ guidance_rescale: float = 0.0,
1301
+ original_size: Optional[tuple[int, int]] = None,
1302
+ crops_coords_top_left: Optional[tuple[int, int]] = (0, 0),
1303
+ target_size: Optional[tuple[int, int]] = None,
1304
+ adapter_conditioning_scale: Optional[Union[float, list[float]]] = 1.0,
1305
+ cond_tau: float = 1.0,
1306
+ aesthetic_score: float = 6.0,
1307
+ negative_aesthetic_score: float = 2.5,
1308
+ controlnet_conditioning_scale=1.0,
1309
+ guess_mode: bool = False,
1310
+ control_guidance_start=0.0,
1311
+ control_guidance_end=1.0,
1312
+ ):
1313
+ r"""
1314
+ Function invoked when calling the pipeline for generation.
1315
+
1316
+ Args:
1317
+ prompt (`str` or `List[str]`, *optional*):
1318
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
1319
+ instead.
1320
+ prompt_2 (`str` or `List[str]`, *optional*):
1321
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
1322
+ used in both text-encoders
1323
+ image (`PIL.Image.Image`):
1324
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
1325
+ be masked out with `mask_image` and repainted according to `prompt`.
1326
+ mask_image (`PIL.Image.Image`):
1327
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
1328
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
1329
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
1330
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
1331
+ adapter_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]` or `List[List[PIL.Image.Image]]`):
1332
+ The Adapter input condition. Adapter uses this input condition to generate guidance to Unet. If the
1333
+ type is specified as `Torch.FloatTensor`, it is passed to Adapter as is. PIL.Image.Image` can also be
1334
+ accepted as an image. The control image is automatically resized to fit the output image.
1335
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
1336
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
1337
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
1338
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
1339
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
1340
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
1341
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
1342
+ input to a single ControlNet.
1343
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1344
+ The height in pixels of the generated image.
1345
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
1346
+ The width in pixels of the generated image.
1347
+ strength (`float`, *optional*, defaults to 1.0):
1348
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
1349
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
1350
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
1351
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
1352
+ essentially ignores `image`.
1353
+ num_inference_steps (`int`, *optional*, defaults to 50):
1354
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1355
+ expense of slower inference.
1356
+ denoising_start (`float`, *optional*):
1357
+ When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be
1358
+ bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and
1359
+ it is assumed that the passed `image` is a partly denoised image. Note that when this is specified,
1360
+ strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline
1361
+ is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refining the Image
1362
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output).
1363
+ denoising_end (`float`, *optional*):
1364
+ When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
1365
+ completed before it is intentionally prematurely terminated. As a result, the returned sample will
1366
+ still retain a substantial amount of noise as determined by the discrete timesteps selected by the
1367
+ scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
1368
+ "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
1369
+ Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output)
1370
+ guidance_scale (`float`, *optional*, defaults to 5.0):
1371
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
1372
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
1373
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1374
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1375
+ usually at the expense of lower image quality.
1376
+ negative_prompt (`str` or `List[str]`, *optional*):
1377
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1378
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1379
+ less than `1`).
1380
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1381
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1382
+ `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
1383
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1384
+ The number of images to generate per prompt.
1385
+ eta (`float`, *optional*, defaults to 0.0):
1386
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
1387
+ [`schedulers.DDIMScheduler`], will be ignored for others.
1388
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1389
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1390
+ to make generation deterministic.
1391
+ latents (`torch.FloatTensor`, *optional*):
1392
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1393
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1394
+ tensor will ge generated by sampling using the supplied random `generator`.
1395
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1396
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1397
+ provided, text embeddings will be generated from `prompt` input argument.
1398
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1399
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1400
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1401
+ argument.
1402
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1403
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1404
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1405
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1406
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1407
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1408
+ input argument.
1409
+ output_type (`str`, *optional*, defaults to `"pil"`):
1410
+ The output format of the generate image. Choose between
1411
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1412
+ return_dict (`bool`, *optional*, defaults to `True`):
1413
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionAdapterPipelineOutput`]
1414
+ instead of a plain tuple.
1415
+ callback (`Callable`, *optional*):
1416
+ A function that will be called every `callback_steps` steps during inference. The function will be
1417
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
1418
+ callback_steps (`int`, *optional*, defaults to 1):
1419
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
1420
+ called at every step.
1421
+ cross_attention_kwargs (`dict`, *optional*):
1422
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1423
+ `self.processor` in
1424
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1425
+ guidance_rescale (`float`, *optional*, defaults to 0.7):
1426
+ Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
1427
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
1428
+ [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
1429
+ Guidance rescale factor should fix overexposure when using zero terminal SNR.
1430
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1431
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
1432
+ `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as
1433
+ explained in section 2.2 of
1434
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1435
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
1436
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
1437
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
1438
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
1439
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1440
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
1441
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
1442
+ not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
1443
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1444
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1445
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added to the
1446
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1447
+ corresponding scale as a list.
1448
+ adapter_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1449
+ The outputs of the adapter are multiplied by `adapter_conditioning_scale` before they are added to the
1450
+ residual in the original unet. If multiple adapters are specified in init, you can set the
1451
+ corresponding scale as a list.
1452
+ aesthetic_score (`float`, *optional*, defaults to 6.0):
1453
+ Used to simulate an aesthetic score of the generated image by influencing the positive text condition.
1454
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1455
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
1456
+ negative_aesthetic_score (`float`, *optional*, defaults to 2.5):
1457
+ Part of SDXL's micro-conditioning as explained in section 2.2 of
1458
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). Can be used to
1459
+ simulate an aesthetic score of the generated image by influencing the negative text condition.
1460
+ Examples:
1461
+
1462
+ Returns:
1463
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] or `tuple`:
1464
+ [`~pipelines.stable_diffusion.StableDiffusionAdapterPipelineOutput`] if `return_dict` is True, otherwise a
1465
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
1466
+ """
1467
+ # 0. Default height and width to unet
1468
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1469
+ adapter = self.adapter._orig_mod if is_compiled_module(self.adapter) else self.adapter
1470
+ height, width = self._default_height_width(height, width, adapter_image)
1471
+ device = self._execution_device
1472
+
1473
+ if isinstance(adapter, MultiAdapter):
1474
+ adapter_input = []
1475
+ for one_image in adapter_image:
1476
+ one_image = _preprocess_adapter_image(one_image, height, width)
1477
+ one_image = one_image.to(device=device, dtype=adapter.dtype)
1478
+ adapter_input.append(one_image)
1479
+ else:
1480
+ adapter_input = _preprocess_adapter_image(adapter_image, height, width)
1481
+ adapter_input = adapter_input.to(device=device, dtype=adapter.dtype)
1482
+
1483
+ original_size = original_size or (height, width)
1484
+ target_size = target_size or (height, width)
1485
+
1486
+ # 0.1 align format for control guidance
1487
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1488
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1489
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1490
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1491
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1492
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1493
+ control_guidance_start, control_guidance_end = (
1494
+ mult * [control_guidance_start],
1495
+ mult * [control_guidance_end],
1496
+ )
1497
+
1498
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1499
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1500
+ if isinstance(adapter, MultiAdapter) and isinstance(adapter_conditioning_scale, float):
1501
+ adapter_conditioning_scale = [adapter_conditioning_scale] * len(adapter.nets)
1502
+
1503
+ # 1. Check inputs. Raise error if not correct
1504
+ self.check_inputs(
1505
+ prompt,
1506
+ prompt_2,
1507
+ height,
1508
+ width,
1509
+ callback_steps,
1510
+ negative_prompt=negative_prompt,
1511
+ negative_prompt_2=negative_prompt_2,
1512
+ prompt_embeds=prompt_embeds,
1513
+ negative_prompt_embeds=negative_prompt_embeds,
1514
+ pooled_prompt_embeds=pooled_prompt_embeds,
1515
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1516
+ )
1517
+
1518
+ self.check_conditions(
1519
+ prompt,
1520
+ prompt_embeds,
1521
+ adapter_image,
1522
+ control_image,
1523
+ adapter_conditioning_scale,
1524
+ controlnet_conditioning_scale,
1525
+ control_guidance_start,
1526
+ control_guidance_end,
1527
+ )
1528
+
1529
+ # 2. Define call parameters
1530
+ if prompt is not None and isinstance(prompt, str):
1531
+ batch_size = 1
1532
+ elif prompt is not None and isinstance(prompt, list):
1533
+ batch_size = len(prompt)
1534
+ else:
1535
+ batch_size = prompt_embeds.shape[0]
1536
+
1537
+ device = self._execution_device
1538
+
1539
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1540
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
1541
+ # corresponds to doing no classifier free guidance.
1542
+ do_classifier_free_guidance = guidance_scale > 1.0
1543
+
1544
+ # 3. Encode input prompt
1545
+ (
1546
+ prompt_embeds,
1547
+ negative_prompt_embeds,
1548
+ pooled_prompt_embeds,
1549
+ negative_pooled_prompt_embeds,
1550
+ ) = self.encode_prompt(
1551
+ prompt=prompt,
1552
+ prompt_2=prompt_2,
1553
+ device=device,
1554
+ num_images_per_prompt=num_images_per_prompt,
1555
+ do_classifier_free_guidance=do_classifier_free_guidance,
1556
+ negative_prompt=negative_prompt,
1557
+ negative_prompt_2=negative_prompt_2,
1558
+ prompt_embeds=prompt_embeds,
1559
+ negative_prompt_embeds=negative_prompt_embeds,
1560
+ pooled_prompt_embeds=pooled_prompt_embeds,
1561
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1562
+ )
1563
+
1564
+ # 4. set timesteps
1565
+ def denoising_value_valid(dnv):
1566
+ return isinstance(denoising_end, float) and 0 < dnv < 1
1567
+
1568
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1569
+ timesteps, num_inference_steps = self.get_timesteps(
1570
+ num_inference_steps,
1571
+ strength,
1572
+ device,
1573
+ denoising_start=denoising_start if denoising_value_valid else None,
1574
+ )
1575
+ # check that number of inference steps is not < 1 - as this doesn't make sense
1576
+ if num_inference_steps < 1:
1577
+ raise ValueError(
1578
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1579
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1580
+ )
1581
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1582
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1583
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1584
+ is_strength_max = strength == 1.0
1585
+
1586
+ # 5. Preprocess mask and image - resizes image and mask w.r.t height and width
1587
+ mask, masked_image, init_image = prepare_mask_and_masked_image(
1588
+ image, mask_image, height, width, return_image=True
1589
+ )
1590
+
1591
+ # 6. Prepare latent variables
1592
+ num_channels_latents = self.vae.config.latent_channels
1593
+ num_channels_unet = self.unet.config.in_channels
1594
+ return_image_latents = num_channels_unet == 4
1595
+
1596
+ add_noise = denoising_start is None
1597
+ latents_outputs = self.prepare_latents(
1598
+ batch_size * num_images_per_prompt,
1599
+ num_channels_latents,
1600
+ height,
1601
+ width,
1602
+ prompt_embeds.dtype,
1603
+ device,
1604
+ generator,
1605
+ latents,
1606
+ image=init_image,
1607
+ timestep=latent_timestep,
1608
+ is_strength_max=is_strength_max,
1609
+ add_noise=add_noise,
1610
+ return_noise=True,
1611
+ return_image_latents=return_image_latents,
1612
+ )
1613
+
1614
+ if return_image_latents:
1615
+ latents, noise, image_latents = latents_outputs
1616
+ else:
1617
+ latents, noise = latents_outputs
1618
+
1619
+ # 7. Prepare mask latent variables
1620
+ mask, masked_image_latents = self.prepare_mask_latents(
1621
+ mask,
1622
+ masked_image,
1623
+ batch_size * num_images_per_prompt,
1624
+ height,
1625
+ width,
1626
+ prompt_embeds.dtype,
1627
+ device,
1628
+ generator,
1629
+ do_classifier_free_guidance,
1630
+ )
1631
+
1632
+ # 8. Check that sizes of mask, masked image and latents match
1633
+ if num_channels_unet == 9:
1634
+ # default case for runwayml/stable-diffusion-inpainting
1635
+ num_channels_mask = mask.shape[1]
1636
+ num_channels_masked_image = masked_image_latents.shape[1]
1637
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1638
+ raise ValueError(
1639
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1640
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1641
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1642
+ f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
1643
+ " `pipeline.unet` or your `mask_image` or `image` input."
1644
+ )
1645
+ elif num_channels_unet != 4:
1646
+ raise ValueError(
1647
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1648
+ )
1649
+
1650
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1651
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1652
+
1653
+ # 10. Prepare added time ids & embeddings & adapter features
1654
+ if isinstance(adapter, MultiAdapter):
1655
+ adapter_state = adapter(adapter_input, adapter_conditioning_scale)
1656
+ for k, v in enumerate(adapter_state):
1657
+ adapter_state[k] = v
1658
+ else:
1659
+ adapter_state = adapter(adapter_input)
1660
+ for k, v in enumerate(adapter_state):
1661
+ adapter_state[k] = v * adapter_conditioning_scale
1662
+ if num_images_per_prompt > 1:
1663
+ for k, v in enumerate(adapter_state):
1664
+ adapter_state[k] = v.repeat(num_images_per_prompt, 1, 1, 1)
1665
+ if do_classifier_free_guidance:
1666
+ for k, v in enumerate(adapter_state):
1667
+ adapter_state[k] = torch.cat([v] * 2, dim=0)
1668
+
1669
+ # 10.2 Prepare control images
1670
+ if isinstance(controlnet, ControlNetModel):
1671
+ control_image = self.prepare_control_image(
1672
+ image=control_image,
1673
+ width=width,
1674
+ height=height,
1675
+ batch_size=batch_size * num_images_per_prompt,
1676
+ num_images_per_prompt=num_images_per_prompt,
1677
+ device=device,
1678
+ dtype=controlnet.dtype,
1679
+ do_classifier_free_guidance=do_classifier_free_guidance,
1680
+ guess_mode=guess_mode,
1681
+ )
1682
+ elif isinstance(controlnet, MultiControlNetModel):
1683
+ control_images = []
1684
+
1685
+ for control_image_ in control_image:
1686
+ control_image_ = self.prepare_control_image(
1687
+ image=control_image_,
1688
+ width=width,
1689
+ height=height,
1690
+ batch_size=batch_size * num_images_per_prompt,
1691
+ num_images_per_prompt=num_images_per_prompt,
1692
+ device=device,
1693
+ dtype=controlnet.dtype,
1694
+ do_classifier_free_guidance=do_classifier_free_guidance,
1695
+ guess_mode=guess_mode,
1696
+ )
1697
+
1698
+ control_images.append(control_image_)
1699
+
1700
+ control_image = control_images
1701
+ else:
1702
+ raise ValueError(f"{controlnet.__class__} is not supported.")
1703
+
1704
+ # 8.2 Create tensor stating which controlnets to keep
1705
+ controlnet_keep = []
1706
+ for i in range(len(timesteps)):
1707
+ keeps = [
1708
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1709
+ for s, e in zip(control_guidance_start, control_guidance_end)
1710
+ ]
1711
+ if isinstance(self.controlnet, MultiControlNetModel):
1712
+ controlnet_keep.append(keeps)
1713
+ else:
1714
+ controlnet_keep.append(keeps[0])
1715
+ # ----------------------------------------------------------------
1716
+
1717
+ add_text_embeds = pooled_prompt_embeds
1718
+ if self.text_encoder_2 is None:
1719
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
1720
+ else:
1721
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
1722
+
1723
+ add_time_ids, add_neg_time_ids = self._get_add_time_ids(
1724
+ original_size,
1725
+ crops_coords_top_left,
1726
+ target_size,
1727
+ aesthetic_score,
1728
+ negative_aesthetic_score,
1729
+ dtype=prompt_embeds.dtype,
1730
+ text_encoder_projection_dim=text_encoder_projection_dim,
1731
+ )
1732
+ add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1733
+
1734
+ if do_classifier_free_guidance:
1735
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1736
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
1737
+ add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)
1738
+ add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)
1739
+
1740
+ prompt_embeds = prompt_embeds.to(device)
1741
+ add_text_embeds = add_text_embeds.to(device)
1742
+ add_time_ids = add_time_ids.to(device)
1743
+
1744
+ # 11. Denoising loop
1745
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1746
+
1747
+ # 11.1 Apply denoising_end
1748
+ if (
1749
+ denoising_end is not None
1750
+ and denoising_start is not None
1751
+ and denoising_value_valid(denoising_end)
1752
+ and denoising_value_valid(denoising_start)
1753
+ and denoising_start >= denoising_end
1754
+ ):
1755
+ raise ValueError(
1756
+ f"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: "
1757
+ + f" {denoising_end} when using type float."
1758
+ )
1759
+ elif denoising_end is not None and denoising_value_valid(denoising_end):
1760
+ discrete_timestep_cutoff = int(
1761
+ round(
1762
+ self.scheduler.config.num_train_timesteps
1763
+ - (denoising_end * self.scheduler.config.num_train_timesteps)
1764
+ )
1765
+ )
1766
+ num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
1767
+ timesteps = timesteps[:num_inference_steps]
1768
+
1769
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1770
+ for i, t in enumerate(timesteps):
1771
+ # expand the latents if we are doing classifier free guidance
1772
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1773
+
1774
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1775
+
1776
+ if num_channels_unet == 9:
1777
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1778
+
1779
+ # predict the noise residual
1780
+ added_cond_kwargs = {
1781
+ "text_embeds": add_text_embeds,
1782
+ "time_ids": add_time_ids,
1783
+ }
1784
+
1785
+ if i < int(num_inference_steps * cond_tau):
1786
+ down_block_additional_residuals = [state.clone() for state in adapter_state]
1787
+ else:
1788
+ down_block_additional_residuals = None
1789
+
1790
+ # ----------- ControlNet
1791
+
1792
+ # expand the latents if we are doing classifier free guidance
1793
+ latent_model_input_controlnet = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1794
+
1795
+ # concat latents, mask, masked_image_latents in the channel dimension
1796
+ latent_model_input_controlnet = self.scheduler.scale_model_input(latent_model_input_controlnet, t)
1797
+
1798
+ # controlnet(s) inference
1799
+ if guess_mode and do_classifier_free_guidance:
1800
+ # Infer ControlNet only for the conditional batch.
1801
+ control_model_input = latents
1802
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1803
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1804
+ controlnet_added_cond_kwargs = {
1805
+ "text_embeds": add_text_embeds.chunk(2)[1],
1806
+ "time_ids": add_time_ids.chunk(2)[1],
1807
+ }
1808
+ else:
1809
+ control_model_input = latent_model_input_controlnet
1810
+ controlnet_prompt_embeds = prompt_embeds
1811
+ controlnet_added_cond_kwargs = added_cond_kwargs
1812
+
1813
+ if isinstance(controlnet_keep[i], list):
1814
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1815
+ else:
1816
+ controlnet_cond_scale = controlnet_conditioning_scale
1817
+ if isinstance(controlnet_cond_scale, list):
1818
+ controlnet_cond_scale = controlnet_cond_scale[0]
1819
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1820
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1821
+ control_model_input,
1822
+ t,
1823
+ encoder_hidden_states=controlnet_prompt_embeds,
1824
+ controlnet_cond=control_image,
1825
+ conditioning_scale=cond_scale,
1826
+ guess_mode=guess_mode,
1827
+ added_cond_kwargs=controlnet_added_cond_kwargs,
1828
+ return_dict=False,
1829
+ )
1830
+
1831
+ noise_pred = self.unet(
1832
+ latent_model_input,
1833
+ t,
1834
+ encoder_hidden_states=prompt_embeds,
1835
+ cross_attention_kwargs=cross_attention_kwargs,
1836
+ added_cond_kwargs=added_cond_kwargs,
1837
+ return_dict=False,
1838
+ down_intrablock_additional_residuals=down_block_additional_residuals, # t2iadapter
1839
+ down_block_additional_residuals=down_block_res_samples, # controlnet
1840
+ mid_block_additional_residual=mid_block_res_sample, # controlnet
1841
+ )[0]
1842
+
1843
+ # perform guidance
1844
+ if do_classifier_free_guidance:
1845
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1846
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1847
+
1848
+ if do_classifier_free_guidance and guidance_rescale > 0.0:
1849
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1850
+ noise_pred = rescale_noise_cfg(
1851
+ noise_pred,
1852
+ noise_pred_text,
1853
+ guidance_rescale=guidance_rescale,
1854
+ )
1855
+
1856
+ # compute the previous noisy sample x_t -> x_t-1
1857
+ latents = self.scheduler.step(
1858
+ noise_pred,
1859
+ t,
1860
+ latents,
1861
+ **extra_step_kwargs,
1862
+ return_dict=False,
1863
+ )[0]
1864
+
1865
+ if num_channels_unet == 4:
1866
+ init_latents_proper = image_latents
1867
+ if do_classifier_free_guidance:
1868
+ init_mask, _ = mask.chunk(2)
1869
+ else:
1870
+ init_mask = mask
1871
+
1872
+ if i < len(timesteps) - 1:
1873
+ noise_timestep = timesteps[i + 1]
1874
+ init_latents_proper = self.scheduler.add_noise(
1875
+ init_latents_proper,
1876
+ noise,
1877
+ torch.tensor([noise_timestep]),
1878
+ )
1879
+
1880
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1881
+
1882
+ # call the callback, if provided
1883
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1884
+ progress_bar.update()
1885
+ if callback is not None and i % callback_steps == 0:
1886
+ callback(i, t, latents)
1887
+
1888
+ # make sure the VAE is in float32 mode, as it overflows in float16
1889
+ if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
1890
+ self.upcast_vae()
1891
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1892
+
1893
+ if output_type != "latent":
1894
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1895
+ else:
1896
+ image = latents
1897
+ return StableDiffusionXLPipelineOutput(images=image)
1898
+
1899
+ image = self.image_processor.postprocess(image, output_type=output_type)
1900
+
1901
+ # Offload last model to CPU
1902
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1903
+ self.final_offload_hook.offload()
1904
+
1905
+ if not return_dict:
1906
+ return (image,)
1907
+
1908
+ return StableDiffusionXLPipelineOutput(images=image)
v0.26.3/pipeline_stable_diffusion_xl_instantid.py ADDED
@@ -0,0 +1,1058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import math
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import cv2
20
+ import numpy as np
21
+ import PIL.Image
22
+ import torch
23
+ import torch.nn as nn
24
+
25
+ from diffusers import StableDiffusionXLControlNetPipeline
26
+ from diffusers.image_processor import PipelineImageInput
27
+ from diffusers.models import ControlNetModel
28
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
29
+ from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
30
+ from diffusers.utils import (
31
+ deprecate,
32
+ logging,
33
+ replace_example_docstring,
34
+ )
35
+ from diffusers.utils.import_utils import is_xformers_available
36
+ from diffusers.utils.torch_utils import is_compiled_module, is_torch_version
37
+
38
+
39
+ try:
40
+ import xformers
41
+ import xformers.ops
42
+
43
+ xformers_available = True
44
+ except Exception:
45
+ xformers_available = False
46
+
47
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
48
+
49
+
50
+ def FeedForward(dim, mult=4):
51
+ inner_dim = int(dim * mult)
52
+ return nn.Sequential(
53
+ nn.LayerNorm(dim),
54
+ nn.Linear(dim, inner_dim, bias=False),
55
+ nn.GELU(),
56
+ nn.Linear(inner_dim, dim, bias=False),
57
+ )
58
+
59
+
60
+ def reshape_tensor(x, heads):
61
+ bs, length, width = x.shape
62
+ # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
63
+ x = x.view(bs, length, heads, -1)
64
+ # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
65
+ x = x.transpose(1, 2)
66
+ # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
67
+ x = x.reshape(bs, heads, length, -1)
68
+ return x
69
+
70
+
71
+ class PerceiverAttention(nn.Module):
72
+ def __init__(self, *, dim, dim_head=64, heads=8):
73
+ super().__init__()
74
+ self.scale = dim_head**-0.5
75
+ self.dim_head = dim_head
76
+ self.heads = heads
77
+ inner_dim = dim_head * heads
78
+
79
+ self.norm1 = nn.LayerNorm(dim)
80
+ self.norm2 = nn.LayerNorm(dim)
81
+
82
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
83
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
84
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
85
+
86
+ def forward(self, x, latents):
87
+ """
88
+ Args:
89
+ x (torch.Tensor): image features
90
+ shape (b, n1, D)
91
+ latent (torch.Tensor): latent features
92
+ shape (b, n2, D)
93
+ """
94
+ x = self.norm1(x)
95
+ latents = self.norm2(latents)
96
+
97
+ b, l, _ = latents.shape
98
+
99
+ q = self.to_q(latents)
100
+ kv_input = torch.cat((x, latents), dim=-2)
101
+ k, v = self.to_kv(kv_input).chunk(2, dim=-1)
102
+
103
+ q = reshape_tensor(q, self.heads)
104
+ k = reshape_tensor(k, self.heads)
105
+ v = reshape_tensor(v, self.heads)
106
+
107
+ # attention
108
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
109
+ weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
110
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
111
+ out = weight @ v
112
+
113
+ out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
114
+
115
+ return self.to_out(out)
116
+
117
+
118
+ class Resampler(nn.Module):
119
+ def __init__(
120
+ self,
121
+ dim=1024,
122
+ depth=8,
123
+ dim_head=64,
124
+ heads=16,
125
+ num_queries=8,
126
+ embedding_dim=768,
127
+ output_dim=1024,
128
+ ff_mult=4,
129
+ ):
130
+ super().__init__()
131
+
132
+ self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
133
+
134
+ self.proj_in = nn.Linear(embedding_dim, dim)
135
+
136
+ self.proj_out = nn.Linear(dim, output_dim)
137
+ self.norm_out = nn.LayerNorm(output_dim)
138
+
139
+ self.layers = nn.ModuleList([])
140
+ for _ in range(depth):
141
+ self.layers.append(
142
+ nn.ModuleList(
143
+ [
144
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
145
+ FeedForward(dim=dim, mult=ff_mult),
146
+ ]
147
+ )
148
+ )
149
+
150
+ def forward(self, x):
151
+ latents = self.latents.repeat(x.size(0), 1, 1)
152
+ x = self.proj_in(x)
153
+
154
+ for attn, ff in self.layers:
155
+ latents = attn(x, latents) + latents
156
+ latents = ff(latents) + latents
157
+
158
+ latents = self.proj_out(latents)
159
+ return self.norm_out(latents)
160
+
161
+
162
+ class AttnProcessor(nn.Module):
163
+ r"""
164
+ Default processor for performing attention-related computations.
165
+ """
166
+
167
+ def __init__(
168
+ self,
169
+ hidden_size=None,
170
+ cross_attention_dim=None,
171
+ ):
172
+ super().__init__()
173
+
174
+ def __call__(
175
+ self,
176
+ attn,
177
+ hidden_states,
178
+ encoder_hidden_states=None,
179
+ attention_mask=None,
180
+ temb=None,
181
+ ):
182
+ residual = hidden_states
183
+
184
+ if attn.spatial_norm is not None:
185
+ hidden_states = attn.spatial_norm(hidden_states, temb)
186
+
187
+ input_ndim = hidden_states.ndim
188
+
189
+ if input_ndim == 4:
190
+ batch_size, channel, height, width = hidden_states.shape
191
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
192
+
193
+ batch_size, sequence_length, _ = (
194
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
195
+ )
196
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
197
+
198
+ if attn.group_norm is not None:
199
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
200
+
201
+ query = attn.to_q(hidden_states)
202
+
203
+ if encoder_hidden_states is None:
204
+ encoder_hidden_states = hidden_states
205
+ elif attn.norm_cross:
206
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
207
+
208
+ key = attn.to_k(encoder_hidden_states)
209
+ value = attn.to_v(encoder_hidden_states)
210
+
211
+ query = attn.head_to_batch_dim(query)
212
+ key = attn.head_to_batch_dim(key)
213
+ value = attn.head_to_batch_dim(value)
214
+
215
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
216
+ hidden_states = torch.bmm(attention_probs, value)
217
+ hidden_states = attn.batch_to_head_dim(hidden_states)
218
+
219
+ # linear proj
220
+ hidden_states = attn.to_out[0](hidden_states)
221
+ # dropout
222
+ hidden_states = attn.to_out[1](hidden_states)
223
+
224
+ if input_ndim == 4:
225
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
226
+
227
+ if attn.residual_connection:
228
+ hidden_states = hidden_states + residual
229
+
230
+ hidden_states = hidden_states / attn.rescale_output_factor
231
+
232
+ return hidden_states
233
+
234
+
235
+ class IPAttnProcessor(nn.Module):
236
+ r"""
237
+ Attention processor for IP-Adapater.
238
+ Args:
239
+ hidden_size (`int`):
240
+ The hidden size of the attention layer.
241
+ cross_attention_dim (`int`):
242
+ The number of channels in the `encoder_hidden_states`.
243
+ scale (`float`, defaults to 1.0):
244
+ the weight scale of image prompt.
245
+ num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16):
246
+ The context length of the image features.
247
+ """
248
+
249
+ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4):
250
+ super().__init__()
251
+
252
+ self.hidden_size = hidden_size
253
+ self.cross_attention_dim = cross_attention_dim
254
+ self.scale = scale
255
+ self.num_tokens = num_tokens
256
+
257
+ self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
258
+ self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
259
+
260
+ def __call__(
261
+ self,
262
+ attn,
263
+ hidden_states,
264
+ encoder_hidden_states=None,
265
+ attention_mask=None,
266
+ temb=None,
267
+ ):
268
+ residual = hidden_states
269
+
270
+ if attn.spatial_norm is not None:
271
+ hidden_states = attn.spatial_norm(hidden_states, temb)
272
+
273
+ input_ndim = hidden_states.ndim
274
+
275
+ if input_ndim == 4:
276
+ batch_size, channel, height, width = hidden_states.shape
277
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
278
+
279
+ batch_size, sequence_length, _ = (
280
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
281
+ )
282
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
283
+
284
+ if attn.group_norm is not None:
285
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
286
+
287
+ query = attn.to_q(hidden_states)
288
+
289
+ if encoder_hidden_states is None:
290
+ encoder_hidden_states = hidden_states
291
+ else:
292
+ # get encoder_hidden_states, ip_hidden_states
293
+ end_pos = encoder_hidden_states.shape[1] - self.num_tokens
294
+ encoder_hidden_states, ip_hidden_states = (
295
+ encoder_hidden_states[:, :end_pos, :],
296
+ encoder_hidden_states[:, end_pos:, :],
297
+ )
298
+ if attn.norm_cross:
299
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
300
+
301
+ key = attn.to_k(encoder_hidden_states)
302
+ value = attn.to_v(encoder_hidden_states)
303
+
304
+ query = attn.head_to_batch_dim(query)
305
+ key = attn.head_to_batch_dim(key)
306
+ value = attn.head_to_batch_dim(value)
307
+
308
+ if xformers_available:
309
+ hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask)
310
+ else:
311
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
312
+ hidden_states = torch.bmm(attention_probs, value)
313
+ hidden_states = attn.batch_to_head_dim(hidden_states)
314
+
315
+ # for ip-adapter
316
+ ip_key = self.to_k_ip(ip_hidden_states)
317
+ ip_value = self.to_v_ip(ip_hidden_states)
318
+
319
+ ip_key = attn.head_to_batch_dim(ip_key)
320
+ ip_value = attn.head_to_batch_dim(ip_value)
321
+
322
+ if xformers_available:
323
+ ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None)
324
+ else:
325
+ ip_attention_probs = attn.get_attention_scores(query, ip_key, None)
326
+ ip_hidden_states = torch.bmm(ip_attention_probs, ip_value)
327
+ ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states)
328
+
329
+ hidden_states = hidden_states + self.scale * ip_hidden_states
330
+
331
+ # linear proj
332
+ hidden_states = attn.to_out[0](hidden_states)
333
+ # dropout
334
+ hidden_states = attn.to_out[1](hidden_states)
335
+
336
+ if input_ndim == 4:
337
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
338
+
339
+ if attn.residual_connection:
340
+ hidden_states = hidden_states + residual
341
+
342
+ hidden_states = hidden_states / attn.rescale_output_factor
343
+
344
+ return hidden_states
345
+
346
+ def _memory_efficient_attention_xformers(self, query, key, value, attention_mask):
347
+ # TODO attention_mask
348
+ query = query.contiguous()
349
+ key = key.contiguous()
350
+ value = value.contiguous()
351
+ hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
352
+ return hidden_states
353
+
354
+
355
+ EXAMPLE_DOC_STRING = """
356
+ Examples:
357
+ ```py
358
+ >>> # !pip install opencv-python transformers accelerate insightface
359
+ >>> import diffusers
360
+ >>> from diffusers.utils import load_image
361
+ >>> from diffusers.models import ControlNetModel
362
+
363
+ >>> import cv2
364
+ >>> import torch
365
+ >>> import numpy as np
366
+ >>> from PIL import Image
367
+
368
+ >>> from insightface.app import FaceAnalysis
369
+ >>> from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps
370
+
371
+ >>> # download 'antelopev2' under ./models
372
+ >>> app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
373
+ >>> app.prepare(ctx_id=0, det_size=(640, 640))
374
+
375
+ >>> # download models under ./checkpoints
376
+ >>> face_adapter = f'./checkpoints/ip-adapter.bin'
377
+ >>> controlnet_path = f'./checkpoints/ControlNetModel'
378
+
379
+ >>> # load IdentityNet
380
+ >>> controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
381
+
382
+ >>> pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
383
+ ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16
384
+ ... )
385
+ >>> pipe.cuda()
386
+
387
+ >>> # load adapter
388
+ >>> pipe.load_ip_adapter_instantid(face_adapter)
389
+
390
+ >>> prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality"
391
+ >>> negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured"
392
+
393
+ >>> # load an image
394
+ >>> image = load_image("your-example.jpg")
395
+
396
+ >>> face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))[-1]
397
+ >>> face_emb = face_info['embedding']
398
+ >>> face_kps = draw_kps(face_image, face_info['kps'])
399
+
400
+ >>> pipe.set_ip_adapter_scale(0.8)
401
+
402
+ >>> # generate image
403
+ >>> image = pipe(
404
+ ... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8
405
+ ... ).images[0]
406
+ ```
407
+ """
408
+
409
+
410
+ def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
411
+ stickwidth = 4
412
+ limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
413
+ kps = np.array(kps)
414
+
415
+ w, h = image_pil.size
416
+ out_img = np.zeros([h, w, 3])
417
+
418
+ for i in range(len(limbSeq)):
419
+ index = limbSeq[i]
420
+ color = color_list[index[0]]
421
+
422
+ x = kps[index][:, 0]
423
+ y = kps[index][:, 1]
424
+ length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
425
+ angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
426
+ polygon = cv2.ellipse2Poly(
427
+ (int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1
428
+ )
429
+ out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
430
+ out_img = (out_img * 0.6).astype(np.uint8)
431
+
432
+ for idx_kp, kp in enumerate(kps):
433
+ color = color_list[idx_kp]
434
+ x, y = kp
435
+ out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
436
+
437
+ out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
438
+ return out_img_pil
439
+
440
+
441
+ class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
442
+ def cuda(self, dtype=torch.float16, use_xformers=False):
443
+ self.to("cuda", dtype)
444
+
445
+ if hasattr(self, "image_proj_model"):
446
+ self.image_proj_model.to(self.unet.device).to(self.unet.dtype)
447
+
448
+ if use_xformers:
449
+ if is_xformers_available():
450
+ import xformers
451
+ from packaging import version
452
+
453
+ xformers_version = version.parse(xformers.__version__)
454
+ if xformers_version == version.parse("0.0.16"):
455
+ logger.warn(
456
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
457
+ )
458
+ self.enable_xformers_memory_efficient_attention()
459
+ else:
460
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
461
+
462
+ def load_ip_adapter_instantid(self, model_ckpt, image_emb_dim=512, num_tokens=16, scale=0.5):
463
+ self.set_image_proj_model(model_ckpt, image_emb_dim, num_tokens)
464
+ self.set_ip_adapter(model_ckpt, num_tokens, scale)
465
+
466
+ def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16):
467
+ image_proj_model = Resampler(
468
+ dim=1280,
469
+ depth=4,
470
+ dim_head=64,
471
+ heads=20,
472
+ num_queries=num_tokens,
473
+ embedding_dim=image_emb_dim,
474
+ output_dim=self.unet.config.cross_attention_dim,
475
+ ff_mult=4,
476
+ )
477
+
478
+ image_proj_model.eval()
479
+
480
+ self.image_proj_model = image_proj_model.to(self.device, dtype=self.dtype)
481
+ state_dict = torch.load(model_ckpt, map_location="cpu")
482
+ if "image_proj" in state_dict:
483
+ state_dict = state_dict["image_proj"]
484
+ self.image_proj_model.load_state_dict(state_dict)
485
+
486
+ self.image_proj_model_in_features = image_emb_dim
487
+
488
+ def set_ip_adapter(self, model_ckpt, num_tokens, scale):
489
+ unet = self.unet
490
+ attn_procs = {}
491
+ for name in unet.attn_processors.keys():
492
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
493
+ if name.startswith("mid_block"):
494
+ hidden_size = unet.config.block_out_channels[-1]
495
+ elif name.startswith("up_blocks"):
496
+ block_id = int(name[len("up_blocks.")])
497
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
498
+ elif name.startswith("down_blocks"):
499
+ block_id = int(name[len("down_blocks.")])
500
+ hidden_size = unet.config.block_out_channels[block_id]
501
+ if cross_attention_dim is None:
502
+ attn_procs[name] = AttnProcessor().to(unet.device, dtype=unet.dtype)
503
+ else:
504
+ attn_procs[name] = IPAttnProcessor(
505
+ hidden_size=hidden_size,
506
+ cross_attention_dim=cross_attention_dim,
507
+ scale=scale,
508
+ num_tokens=num_tokens,
509
+ ).to(unet.device, dtype=unet.dtype)
510
+ unet.set_attn_processor(attn_procs)
511
+
512
+ state_dict = torch.load(model_ckpt, map_location="cpu")
513
+ ip_layers = torch.nn.ModuleList(self.unet.attn_processors.values())
514
+ if "ip_adapter" in state_dict:
515
+ state_dict = state_dict["ip_adapter"]
516
+ ip_layers.load_state_dict(state_dict)
517
+
518
+ def set_ip_adapter_scale(self, scale):
519
+ unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
520
+ for attn_processor in unet.attn_processors.values():
521
+ if isinstance(attn_processor, IPAttnProcessor):
522
+ attn_processor.scale = scale
523
+
524
+ def _encode_prompt_image_emb(self, prompt_image_emb, device, dtype, do_classifier_free_guidance):
525
+ if isinstance(prompt_image_emb, torch.Tensor):
526
+ prompt_image_emb = prompt_image_emb.clone().detach()
527
+ else:
528
+ prompt_image_emb = torch.tensor(prompt_image_emb)
529
+
530
+ prompt_image_emb = prompt_image_emb.to(device=device, dtype=dtype)
531
+ prompt_image_emb = prompt_image_emb.reshape([1, -1, self.image_proj_model_in_features])
532
+
533
+ if do_classifier_free_guidance:
534
+ prompt_image_emb = torch.cat([torch.zeros_like(prompt_image_emb), prompt_image_emb], dim=0)
535
+ else:
536
+ prompt_image_emb = torch.cat([prompt_image_emb], dim=0)
537
+
538
+ prompt_image_emb = self.image_proj_model(prompt_image_emb)
539
+ return prompt_image_emb
540
+
541
+ @torch.no_grad()
542
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
543
+ def __call__(
544
+ self,
545
+ prompt: Union[str, List[str]] = None,
546
+ prompt_2: Optional[Union[str, List[str]]] = None,
547
+ image: PipelineImageInput = None,
548
+ height: Optional[int] = None,
549
+ width: Optional[int] = None,
550
+ num_inference_steps: int = 50,
551
+ guidance_scale: float = 5.0,
552
+ negative_prompt: Optional[Union[str, List[str]]] = None,
553
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
554
+ num_images_per_prompt: Optional[int] = 1,
555
+ eta: float = 0.0,
556
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
557
+ latents: Optional[torch.FloatTensor] = None,
558
+ prompt_embeds: Optional[torch.FloatTensor] = None,
559
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
560
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
561
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
562
+ image_embeds: Optional[torch.FloatTensor] = None,
563
+ output_type: Optional[str] = "pil",
564
+ return_dict: bool = True,
565
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
566
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
567
+ guess_mode: bool = False,
568
+ control_guidance_start: Union[float, List[float]] = 0.0,
569
+ control_guidance_end: Union[float, List[float]] = 1.0,
570
+ original_size: Tuple[int, int] = None,
571
+ crops_coords_top_left: Tuple[int, int] = (0, 0),
572
+ target_size: Tuple[int, int] = None,
573
+ negative_original_size: Optional[Tuple[int, int]] = None,
574
+ negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
575
+ negative_target_size: Optional[Tuple[int, int]] = None,
576
+ clip_skip: Optional[int] = None,
577
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
578
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
579
+ **kwargs,
580
+ ):
581
+ r"""
582
+ The call function to the pipeline for generation.
583
+
584
+ Args:
585
+ prompt (`str` or `List[str]`, *optional*):
586
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
587
+ prompt_2 (`str` or `List[str]`, *optional*):
588
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
589
+ used in both text-encoders.
590
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
591
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
592
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
593
+ specified as `torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be
594
+ accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height
595
+ and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in
596
+ `init`, images must be passed as a list such that each element of the list can be correctly batched for
597
+ input to a single ControlNet.
598
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
599
+ The height in pixels of the generated image. Anything below 512 pixels won't work well for
600
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
601
+ and checkpoints that are not specifically fine-tuned on low resolutions.
602
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
603
+ The width in pixels of the generated image. Anything below 512 pixels won't work well for
604
+ [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
605
+ and checkpoints that are not specifically fine-tuned on low resolutions.
606
+ num_inference_steps (`int`, *optional*, defaults to 50):
607
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
608
+ expense of slower inference.
609
+ guidance_scale (`float`, *optional*, defaults to 5.0):
610
+ A higher guidance scale value encourages the model to generate images closely linked to the text
611
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
612
+ negative_prompt (`str` or `List[str]`, *optional*):
613
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
614
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
615
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
616
+ The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2`
617
+ and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders.
618
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
619
+ The number of images to generate per prompt.
620
+ eta (`float`, *optional*, defaults to 0.0):
621
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
622
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
623
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
624
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
625
+ generation deterministic.
626
+ latents (`torch.FloatTensor`, *optional*):
627
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
628
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
629
+ tensor is generated by sampling using the supplied random `generator`.
630
+ prompt_embeds (`torch.FloatTensor`, *optional*):
631
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
632
+ provided, text embeddings are generated from the `prompt` input argument.
633
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
634
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
635
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
636
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
637
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
638
+ not provided, pooled text embeddings are generated from `prompt` input argument.
639
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
640
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt
641
+ weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input
642
+ argument.
643
+ image_embeds (`torch.FloatTensor`, *optional*):
644
+ Pre-generated image embeddings.
645
+ output_type (`str`, *optional*, defaults to `"pil"`):
646
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
647
+ return_dict (`bool`, *optional*, defaults to `True`):
648
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
649
+ plain tuple.
650
+ cross_attention_kwargs (`dict`, *optional*):
651
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
652
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
653
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
654
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
655
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
656
+ the corresponding scale as a list.
657
+ guess_mode (`bool`, *optional*, defaults to `False`):
658
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
659
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
660
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
661
+ The percentage of total steps at which the ControlNet starts applying.
662
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
663
+ The percentage of total steps at which the ControlNet stops applying.
664
+ original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
665
+ If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
666
+ `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
667
+ explained in section 2.2 of
668
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
669
+ crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
670
+ `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
671
+ `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
672
+ `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
673
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
674
+ target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
675
+ For most cases, `target_size` should be set to the desired height and width of the generated image. If
676
+ not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
677
+ section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
678
+ negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
679
+ To negatively condition the generation process based on a specific image resolution. Part of SDXL's
680
+ micro-conditioning as explained in section 2.2 of
681
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
682
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
683
+ negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
684
+ To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
685
+ micro-conditioning as explained in section 2.2 of
686
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
687
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
688
+ negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
689
+ To negatively condition the generation process based on a target image resolution. It should be as same
690
+ as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
691
+ [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
692
+ information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
693
+ clip_skip (`int`, *optional*):
694
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
695
+ the output of the pre-final layer will be used for computing the prompt embeddings.
696
+ callback_on_step_end (`Callable`, *optional*):
697
+ A function that calls at the end of each denoising steps during the inference. The function is called
698
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
699
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
700
+ `callback_on_step_end_tensor_inputs`.
701
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
702
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
703
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
704
+ `._callback_tensor_inputs` attribute of your pipeine class.
705
+
706
+ Examples:
707
+
708
+ Returns:
709
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
710
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
711
+ otherwise a `tuple` is returned containing the output images.
712
+ """
713
+
714
+ callback = kwargs.pop("callback", None)
715
+ callback_steps = kwargs.pop("callback_steps", None)
716
+
717
+ if callback is not None:
718
+ deprecate(
719
+ "callback",
720
+ "1.0.0",
721
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
722
+ )
723
+ if callback_steps is not None:
724
+ deprecate(
725
+ "callback_steps",
726
+ "1.0.0",
727
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
728
+ )
729
+
730
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
731
+
732
+ # align format for control guidance
733
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
734
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
735
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
736
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
737
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
738
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
739
+ control_guidance_start, control_guidance_end = (
740
+ mult * [control_guidance_start],
741
+ mult * [control_guidance_end],
742
+ )
743
+
744
+ # 1. Check inputs. Raise error if not correct
745
+ self.check_inputs(
746
+ prompt,
747
+ prompt_2,
748
+ image,
749
+ callback_steps,
750
+ negative_prompt,
751
+ negative_prompt_2,
752
+ prompt_embeds,
753
+ negative_prompt_embeds,
754
+ pooled_prompt_embeds,
755
+ negative_pooled_prompt_embeds,
756
+ controlnet_conditioning_scale,
757
+ control_guidance_start,
758
+ control_guidance_end,
759
+ callback_on_step_end_tensor_inputs,
760
+ )
761
+
762
+ self._guidance_scale = guidance_scale
763
+ self._clip_skip = clip_skip
764
+ self._cross_attention_kwargs = cross_attention_kwargs
765
+
766
+ # 2. Define call parameters
767
+ if prompt is not None and isinstance(prompt, str):
768
+ batch_size = 1
769
+ elif prompt is not None and isinstance(prompt, list):
770
+ batch_size = len(prompt)
771
+ else:
772
+ batch_size = prompt_embeds.shape[0]
773
+
774
+ device = self._execution_device
775
+
776
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
777
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
778
+
779
+ global_pool_conditions = (
780
+ controlnet.config.global_pool_conditions
781
+ if isinstance(controlnet, ControlNetModel)
782
+ else controlnet.nets[0].config.global_pool_conditions
783
+ )
784
+ guess_mode = guess_mode or global_pool_conditions
785
+
786
+ # 3.1 Encode input prompt
787
+ text_encoder_lora_scale = (
788
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
789
+ )
790
+ (
791
+ prompt_embeds,
792
+ negative_prompt_embeds,
793
+ pooled_prompt_embeds,
794
+ negative_pooled_prompt_embeds,
795
+ ) = self.encode_prompt(
796
+ prompt,
797
+ prompt_2,
798
+ device,
799
+ num_images_per_prompt,
800
+ self.do_classifier_free_guidance,
801
+ negative_prompt,
802
+ negative_prompt_2,
803
+ prompt_embeds=prompt_embeds,
804
+ negative_prompt_embeds=negative_prompt_embeds,
805
+ pooled_prompt_embeds=pooled_prompt_embeds,
806
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
807
+ lora_scale=text_encoder_lora_scale,
808
+ clip_skip=self.clip_skip,
809
+ )
810
+
811
+ # 3.2 Encode image prompt
812
+ prompt_image_emb = self._encode_prompt_image_emb(
813
+ image_embeds, device, self.unet.dtype, self.do_classifier_free_guidance
814
+ )
815
+
816
+ # 4. Prepare image
817
+ if isinstance(controlnet, ControlNetModel):
818
+ image = self.prepare_image(
819
+ image=image,
820
+ width=width,
821
+ height=height,
822
+ batch_size=batch_size * num_images_per_prompt,
823
+ num_images_per_prompt=num_images_per_prompt,
824
+ device=device,
825
+ dtype=controlnet.dtype,
826
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
827
+ guess_mode=guess_mode,
828
+ )
829
+ height, width = image.shape[-2:]
830
+ elif isinstance(controlnet, MultiControlNetModel):
831
+ images = []
832
+
833
+ for image_ in image:
834
+ image_ = self.prepare_image(
835
+ image=image_,
836
+ width=width,
837
+ height=height,
838
+ batch_size=batch_size * num_images_per_prompt,
839
+ num_images_per_prompt=num_images_per_prompt,
840
+ device=device,
841
+ dtype=controlnet.dtype,
842
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
843
+ guess_mode=guess_mode,
844
+ )
845
+
846
+ images.append(image_)
847
+
848
+ image = images
849
+ height, width = image[0].shape[-2:]
850
+ else:
851
+ assert False
852
+
853
+ # 5. Prepare timesteps
854
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
855
+ timesteps = self.scheduler.timesteps
856
+ self._num_timesteps = len(timesteps)
857
+
858
+ # 6. Prepare latent variables
859
+ num_channels_latents = self.unet.config.in_channels
860
+ latents = self.prepare_latents(
861
+ batch_size * num_images_per_prompt,
862
+ num_channels_latents,
863
+ height,
864
+ width,
865
+ prompt_embeds.dtype,
866
+ device,
867
+ generator,
868
+ latents,
869
+ )
870
+
871
+ # 6.5 Optionally get Guidance Scale Embedding
872
+ timestep_cond = None
873
+ if self.unet.config.time_cond_proj_dim is not None:
874
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
875
+ timestep_cond = self.get_guidance_scale_embedding(
876
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
877
+ ).to(device=device, dtype=latents.dtype)
878
+
879
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
880
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
881
+
882
+ # 7.1 Create tensor stating which controlnets to keep
883
+ controlnet_keep = []
884
+ for i in range(len(timesteps)):
885
+ keeps = [
886
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
887
+ for s, e in zip(control_guidance_start, control_guidance_end)
888
+ ]
889
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
890
+
891
+ # 7.2 Prepare added time ids & embeddings
892
+ if isinstance(image, list):
893
+ original_size = original_size or image[0].shape[-2:]
894
+ else:
895
+ original_size = original_size or image.shape[-2:]
896
+ target_size = target_size or (height, width)
897
+
898
+ add_text_embeds = pooled_prompt_embeds
899
+ if self.text_encoder_2 is None:
900
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
901
+ else:
902
+ text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
903
+
904
+ add_time_ids = self._get_add_time_ids(
905
+ original_size,
906
+ crops_coords_top_left,
907
+ target_size,
908
+ dtype=prompt_embeds.dtype,
909
+ text_encoder_projection_dim=text_encoder_projection_dim,
910
+ )
911
+
912
+ if negative_original_size is not None and negative_target_size is not None:
913
+ negative_add_time_ids = self._get_add_time_ids(
914
+ negative_original_size,
915
+ negative_crops_coords_top_left,
916
+ negative_target_size,
917
+ dtype=prompt_embeds.dtype,
918
+ text_encoder_projection_dim=text_encoder_projection_dim,
919
+ )
920
+ else:
921
+ negative_add_time_ids = add_time_ids
922
+
923
+ if self.do_classifier_free_guidance:
924
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
925
+ add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
926
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
927
+
928
+ prompt_embeds = prompt_embeds.to(device)
929
+ add_text_embeds = add_text_embeds.to(device)
930
+ add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
931
+ encoder_hidden_states = torch.cat([prompt_embeds, prompt_image_emb], dim=1)
932
+
933
+ # 8. Denoising loop
934
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
935
+ is_unet_compiled = is_compiled_module(self.unet)
936
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
937
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
938
+
939
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
940
+ for i, t in enumerate(timesteps):
941
+ # Relevant thread:
942
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
943
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
944
+ torch._inductor.cudagraph_mark_step_begin()
945
+ # expand the latents if we are doing classifier free guidance
946
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
947
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
948
+
949
+ added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
950
+
951
+ # controlnet(s) inference
952
+ if guess_mode and self.do_classifier_free_guidance:
953
+ # Infer ControlNet only for the conditional batch.
954
+ control_model_input = latents
955
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
956
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
957
+ controlnet_added_cond_kwargs = {
958
+ "text_embeds": add_text_embeds.chunk(2)[1],
959
+ "time_ids": add_time_ids.chunk(2)[1],
960
+ }
961
+ else:
962
+ control_model_input = latent_model_input
963
+ controlnet_prompt_embeds = prompt_embeds
964
+ controlnet_added_cond_kwargs = added_cond_kwargs
965
+
966
+ if isinstance(controlnet_keep[i], list):
967
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
968
+ else:
969
+ controlnet_cond_scale = controlnet_conditioning_scale
970
+ if isinstance(controlnet_cond_scale, list):
971
+ controlnet_cond_scale = controlnet_cond_scale[0]
972
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
973
+
974
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
975
+ control_model_input,
976
+ t,
977
+ encoder_hidden_states=prompt_image_emb,
978
+ controlnet_cond=image,
979
+ conditioning_scale=cond_scale,
980
+ guess_mode=guess_mode,
981
+ added_cond_kwargs=controlnet_added_cond_kwargs,
982
+ return_dict=False,
983
+ )
984
+
985
+ if guess_mode and self.do_classifier_free_guidance:
986
+ # Infered ControlNet only for the conditional batch.
987
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
988
+ # add 0 to the unconditional batch to keep it unchanged.
989
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
990
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
991
+
992
+ # predict the noise residual
993
+ noise_pred = self.unet(
994
+ latent_model_input,
995
+ t,
996
+ encoder_hidden_states=encoder_hidden_states,
997
+ timestep_cond=timestep_cond,
998
+ cross_attention_kwargs=self.cross_attention_kwargs,
999
+ down_block_additional_residuals=down_block_res_samples,
1000
+ mid_block_additional_residual=mid_block_res_sample,
1001
+ added_cond_kwargs=added_cond_kwargs,
1002
+ return_dict=False,
1003
+ )[0]
1004
+
1005
+ # perform guidance
1006
+ if self.do_classifier_free_guidance:
1007
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1008
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1009
+
1010
+ # compute the previous noisy sample x_t -> x_t-1
1011
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1012
+
1013
+ if callback_on_step_end is not None:
1014
+ callback_kwargs = {}
1015
+ for k in callback_on_step_end_tensor_inputs:
1016
+ callback_kwargs[k] = locals()[k]
1017
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1018
+
1019
+ latents = callback_outputs.pop("latents", latents)
1020
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1021
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1022
+
1023
+ # call the callback, if provided
1024
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1025
+ progress_bar.update()
1026
+ if callback is not None and i % callback_steps == 0:
1027
+ step_idx = i // getattr(self.scheduler, "order", 1)
1028
+ callback(step_idx, t, latents)
1029
+
1030
+ if not output_type == "latent":
1031
+ # make sure the VAE is in float32 mode, as it overflows in float16
1032
+ needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
1033
+ if needs_upcasting:
1034
+ self.upcast_vae()
1035
+ latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
1036
+
1037
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1038
+
1039
+ # cast back to fp16 if needed
1040
+ if needs_upcasting:
1041
+ self.vae.to(dtype=torch.float16)
1042
+ else:
1043
+ image = latents
1044
+
1045
+ if not output_type == "latent":
1046
+ # apply watermark if available
1047
+ if self.watermark is not None:
1048
+ image = self.watermark.apply_watermark(image)
1049
+
1050
+ image = self.image_processor.postprocess(image, output_type=output_type)
1051
+
1052
+ # Offload all models
1053
+ self.maybe_free_model_hooks()
1054
+
1055
+ if not return_dict:
1056
+ return (image,)
1057
+
1058
+ return StableDiffusionXLPipelineOutput(images=image)
v0.26.3/pipeline_zero1to3.py ADDED
@@ -0,0 +1,893 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A diffuser version implementation of Zero1to3 (https://github.com/cvlab-columbia/zero123), ICCV 2023
2
+ # by Xin Kong
3
+
4
+ import inspect
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+
7
+ import kornia
8
+ import numpy as np
9
+ import PIL.Image
10
+ import torch
11
+ from packaging import version
12
+ from transformers import CLIPFeatureExtractor, CLIPVisionModelWithProjection
13
+
14
+ # from ...configuration_utils import FrozenDict
15
+ # from ...models import AutoencoderKL, UNet2DConditionModel
16
+ # from ...schedulers import KarrasDiffusionSchedulers
17
+ # from ...utils import (
18
+ # deprecate,
19
+ # is_accelerate_available,
20
+ # is_accelerate_version,
21
+ # logging,
22
+ # randn_tensor,
23
+ # replace_example_docstring,
24
+ # )
25
+ # from ..pipeline_utils import DiffusionPipeline
26
+ # from . import StableDiffusionPipelineOutput
27
+ # from .safety_checker import StableDiffusionSafetyChecker
28
+ from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
29
+ from diffusers.configuration_utils import ConfigMixin, FrozenDict
30
+ from diffusers.models.modeling_utils import ModelMixin
31
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
32
+ from diffusers.schedulers import KarrasDiffusionSchedulers
33
+ from diffusers.utils import (
34
+ deprecate,
35
+ is_accelerate_available,
36
+ is_accelerate_version,
37
+ logging,
38
+ replace_example_docstring,
39
+ )
40
+ from diffusers.utils.torch_utils import randn_tensor
41
+
42
+
43
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
44
+ # todo
45
+ EXAMPLE_DOC_STRING = """
46
+ Examples:
47
+ ```py
48
+ >>> import torch
49
+ >>> from diffusers import StableDiffusionPipeline
50
+
51
+ >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
52
+ >>> pipe = pipe.to("cuda")
53
+
54
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
55
+ >>> image = pipe(prompt).images[0]
56
+ ```
57
+ """
58
+
59
+
60
+ class CCProjection(ModelMixin, ConfigMixin):
61
+ def __init__(self, in_channel=772, out_channel=768):
62
+ super().__init__()
63
+ self.in_channel = in_channel
64
+ self.out_channel = out_channel
65
+ self.projection = torch.nn.Linear(in_channel, out_channel)
66
+
67
+ def forward(self, x):
68
+ return self.projection(x)
69
+
70
+
71
+ class Zero1to3StableDiffusionPipeline(DiffusionPipeline):
72
+ r"""
73
+ Pipeline for single view conditioned novel view generation using Zero1to3.
74
+
75
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
76
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
77
+
78
+ Args:
79
+ vae ([`AutoencoderKL`]):
80
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
81
+ image_encoder ([`CLIPVisionModelWithProjection`]):
82
+ Frozen CLIP image-encoder. Stable Diffusion Image Variation uses the vision portion of
83
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPVisionModelWithProjection),
84
+ specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
85
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
86
+ scheduler ([`SchedulerMixin`]):
87
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
88
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
89
+ safety_checker ([`StableDiffusionSafetyChecker`]):
90
+ Classification module that estimates whether generated images could be considered offensive or harmful.
91
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
92
+ feature_extractor ([`CLIPFeatureExtractor`]):
93
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
94
+ cc_projection ([`CCProjection`]):
95
+ Projection layer to project the concated CLIP features and pose embeddings to the original CLIP feature size.
96
+ """
97
+
98
+ _optional_components = ["safety_checker", "feature_extractor"]
99
+
100
+ def __init__(
101
+ self,
102
+ vae: AutoencoderKL,
103
+ image_encoder: CLIPVisionModelWithProjection,
104
+ unet: UNet2DConditionModel,
105
+ scheduler: KarrasDiffusionSchedulers,
106
+ safety_checker: StableDiffusionSafetyChecker,
107
+ feature_extractor: CLIPFeatureExtractor,
108
+ cc_projection: CCProjection,
109
+ requires_safety_checker: bool = True,
110
+ ):
111
+ super().__init__()
112
+
113
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
114
+ deprecation_message = (
115
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
116
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
117
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
118
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
119
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
120
+ " file"
121
+ )
122
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
123
+ new_config = dict(scheduler.config)
124
+ new_config["steps_offset"] = 1
125
+ scheduler._internal_dict = FrozenDict(new_config)
126
+
127
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
128
+ deprecation_message = (
129
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
130
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
131
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
132
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
133
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
134
+ )
135
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
136
+ new_config = dict(scheduler.config)
137
+ new_config["clip_sample"] = False
138
+ scheduler._internal_dict = FrozenDict(new_config)
139
+
140
+ if safety_checker is None and requires_safety_checker:
141
+ logger.warning(
142
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
143
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
144
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
145
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
146
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
147
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
148
+ )
149
+
150
+ if safety_checker is not None and feature_extractor is None:
151
+ raise ValueError(
152
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
153
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
154
+ )
155
+
156
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
157
+ version.parse(unet.config._diffusers_version).base_version
158
+ ) < version.parse("0.9.0.dev0")
159
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
160
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
161
+ deprecation_message = (
162
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
163
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
164
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
165
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
166
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
167
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
168
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
169
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
170
+ " the `unet/config.json` file"
171
+ )
172
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
173
+ new_config = dict(unet.config)
174
+ new_config["sample_size"] = 64
175
+ unet._internal_dict = FrozenDict(new_config)
176
+
177
+ self.register_modules(
178
+ vae=vae,
179
+ image_encoder=image_encoder,
180
+ unet=unet,
181
+ scheduler=scheduler,
182
+ safety_checker=safety_checker,
183
+ feature_extractor=feature_extractor,
184
+ cc_projection=cc_projection,
185
+ )
186
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
187
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
188
+ # self.model_mode = None
189
+
190
+ def enable_vae_slicing(self):
191
+ r"""
192
+ Enable sliced VAE decoding.
193
+
194
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
195
+ steps. This is useful to save some memory and allow larger batch sizes.
196
+ """
197
+ self.vae.enable_slicing()
198
+
199
+ def disable_vae_slicing(self):
200
+ r"""
201
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
202
+ computing decoding in one step.
203
+ """
204
+ self.vae.disable_slicing()
205
+
206
+ def enable_vae_tiling(self):
207
+ r"""
208
+ Enable tiled VAE decoding.
209
+
210
+ When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
211
+ several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
212
+ """
213
+ self.vae.enable_tiling()
214
+
215
+ def disable_vae_tiling(self):
216
+ r"""
217
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
218
+ computing decoding in one step.
219
+ """
220
+ self.vae.disable_tiling()
221
+
222
+ def enable_sequential_cpu_offload(self, gpu_id=0):
223
+ r"""
224
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
225
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
226
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
227
+ Note that offloading happens on a submodule basis. Memory savings are higher than with
228
+ `enable_model_cpu_offload`, but performance is lower.
229
+ """
230
+ if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
231
+ from accelerate import cpu_offload
232
+ else:
233
+ raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
234
+
235
+ device = torch.device(f"cuda:{gpu_id}")
236
+
237
+ if self.device.type != "cpu":
238
+ self.to("cpu", silence_dtype_warnings=True)
239
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
240
+
241
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
242
+ cpu_offload(cpu_offloaded_model, device)
243
+
244
+ if self.safety_checker is not None:
245
+ cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
246
+
247
+ def enable_model_cpu_offload(self, gpu_id=0):
248
+ r"""
249
+ Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
250
+ to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
251
+ method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
252
+ `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
253
+ """
254
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
255
+ from accelerate import cpu_offload_with_hook
256
+ else:
257
+ raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
258
+
259
+ device = torch.device(f"cuda:{gpu_id}")
260
+
261
+ if self.device.type != "cpu":
262
+ self.to("cpu", silence_dtype_warnings=True)
263
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
264
+
265
+ hook = None
266
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
267
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
268
+
269
+ if self.safety_checker is not None:
270
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
271
+
272
+ # We'll offload the last model manually.
273
+ self.final_offload_hook = hook
274
+
275
+ @property
276
+ def _execution_device(self):
277
+ r"""
278
+ Returns the device on which the pipeline's models will be executed. After calling
279
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
280
+ hooks.
281
+ """
282
+ if not hasattr(self.unet, "_hf_hook"):
283
+ return self.device
284
+ for module in self.unet.modules():
285
+ if (
286
+ hasattr(module, "_hf_hook")
287
+ and hasattr(module._hf_hook, "execution_device")
288
+ and module._hf_hook.execution_device is not None
289
+ ):
290
+ return torch.device(module._hf_hook.execution_device)
291
+ return self.device
292
+
293
+ def _encode_prompt(
294
+ self,
295
+ prompt,
296
+ device,
297
+ num_images_per_prompt,
298
+ do_classifier_free_guidance,
299
+ negative_prompt=None,
300
+ prompt_embeds: Optional[torch.FloatTensor] = None,
301
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
302
+ ):
303
+ r"""
304
+ Encodes the prompt into text encoder hidden states.
305
+
306
+ Args:
307
+ prompt (`str` or `List[str]`, *optional*):
308
+ prompt to be encoded
309
+ device: (`torch.device`):
310
+ torch device
311
+ num_images_per_prompt (`int`):
312
+ number of images that should be generated per prompt
313
+ do_classifier_free_guidance (`bool`):
314
+ whether to use classifier free guidance or not
315
+ negative_prompt (`str` or `List[str]`, *optional*):
316
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
317
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
318
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
319
+ prompt_embeds (`torch.FloatTensor`, *optional*):
320
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
321
+ provided, text embeddings will be generated from `prompt` input argument.
322
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
323
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
324
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
325
+ argument.
326
+ """
327
+ if prompt is not None and isinstance(prompt, str):
328
+ batch_size = 1
329
+ elif prompt is not None and isinstance(prompt, list):
330
+ batch_size = len(prompt)
331
+ else:
332
+ batch_size = prompt_embeds.shape[0]
333
+
334
+ if prompt_embeds is None:
335
+ text_inputs = self.tokenizer(
336
+ prompt,
337
+ padding="max_length",
338
+ max_length=self.tokenizer.model_max_length,
339
+ truncation=True,
340
+ return_tensors="pt",
341
+ )
342
+ text_input_ids = text_inputs.input_ids
343
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
344
+
345
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
346
+ text_input_ids, untruncated_ids
347
+ ):
348
+ removed_text = self.tokenizer.batch_decode(
349
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
350
+ )
351
+ logger.warning(
352
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
353
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
354
+ )
355
+
356
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
357
+ attention_mask = text_inputs.attention_mask.to(device)
358
+ else:
359
+ attention_mask = None
360
+
361
+ prompt_embeds = self.text_encoder(
362
+ text_input_ids.to(device),
363
+ attention_mask=attention_mask,
364
+ )
365
+ prompt_embeds = prompt_embeds[0]
366
+
367
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
368
+
369
+ bs_embed, seq_len, _ = prompt_embeds.shape
370
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
371
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
372
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
373
+
374
+ # get unconditional embeddings for classifier free guidance
375
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
376
+ uncond_tokens: List[str]
377
+ if negative_prompt is None:
378
+ uncond_tokens = [""] * batch_size
379
+ elif type(prompt) is not type(negative_prompt):
380
+ raise TypeError(
381
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
382
+ f" {type(prompt)}."
383
+ )
384
+ elif isinstance(negative_prompt, str):
385
+ uncond_tokens = [negative_prompt]
386
+ elif batch_size != len(negative_prompt):
387
+ raise ValueError(
388
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
389
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
390
+ " the batch size of `prompt`."
391
+ )
392
+ else:
393
+ uncond_tokens = negative_prompt
394
+
395
+ max_length = prompt_embeds.shape[1]
396
+ uncond_input = self.tokenizer(
397
+ uncond_tokens,
398
+ padding="max_length",
399
+ max_length=max_length,
400
+ truncation=True,
401
+ return_tensors="pt",
402
+ )
403
+
404
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
405
+ attention_mask = uncond_input.attention_mask.to(device)
406
+ else:
407
+ attention_mask = None
408
+
409
+ negative_prompt_embeds = self.text_encoder(
410
+ uncond_input.input_ids.to(device),
411
+ attention_mask=attention_mask,
412
+ )
413
+ negative_prompt_embeds = negative_prompt_embeds[0]
414
+
415
+ if do_classifier_free_guidance:
416
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
417
+ seq_len = negative_prompt_embeds.shape[1]
418
+
419
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
420
+
421
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
422
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
423
+
424
+ # For classifier free guidance, we need to do two forward passes.
425
+ # Here we concatenate the unconditional and text embeddings into a single batch
426
+ # to avoid doing two forward passes
427
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
428
+
429
+ return prompt_embeds
430
+
431
+ def CLIP_preprocess(self, x):
432
+ dtype = x.dtype
433
+ # following openai's implementation
434
+ # TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741
435
+ # follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608
436
+ if isinstance(x, torch.Tensor):
437
+ if x.min() < -1.0 or x.max() > 1.0:
438
+ raise ValueError("Expected input tensor to have values in the range [-1, 1]")
439
+ x = kornia.geometry.resize(
440
+ x.to(torch.float32), (224, 224), interpolation="bicubic", align_corners=True, antialias=False
441
+ ).to(dtype=dtype)
442
+ x = (x + 1.0) / 2.0
443
+ # renormalize according to clip
444
+ x = kornia.enhance.normalize(
445
+ x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])
446
+ )
447
+ return x
448
+
449
+ # from image_variation
450
+ def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance):
451
+ dtype = next(self.image_encoder.parameters()).dtype
452
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
453
+ raise ValueError(
454
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
455
+ )
456
+
457
+ if isinstance(image, torch.Tensor):
458
+ # Batch single image
459
+ if image.ndim == 3:
460
+ assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
461
+ image = image.unsqueeze(0)
462
+
463
+ assert image.ndim == 4, "Image must have 4 dimensions"
464
+
465
+ # Check image is in [-1, 1]
466
+ if image.min() < -1 or image.max() > 1:
467
+ raise ValueError("Image should be in [-1, 1] range")
468
+ else:
469
+ # preprocess image
470
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
471
+ image = [image]
472
+
473
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
474
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
475
+ image = np.concatenate(image, axis=0)
476
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
477
+ image = np.concatenate([i[None, :] for i in image], axis=0)
478
+
479
+ image = image.transpose(0, 3, 1, 2)
480
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
481
+
482
+ image = image.to(device=device, dtype=dtype)
483
+
484
+ image = self.CLIP_preprocess(image)
485
+ # if not isinstance(image, torch.Tensor):
486
+ # # 0-255
487
+ # print("Warning: image is processed by hf's preprocess, which is different from openai original's.")
488
+ # image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
489
+ image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)
490
+ image_embeddings = image_embeddings.unsqueeze(1)
491
+
492
+ # duplicate image embeddings for each generation per prompt, using mps friendly method
493
+ bs_embed, seq_len, _ = image_embeddings.shape
494
+ image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
495
+ image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
496
+
497
+ if do_classifier_free_guidance:
498
+ negative_prompt_embeds = torch.zeros_like(image_embeddings)
499
+
500
+ # For classifier free guidance, we need to do two forward passes.
501
+ # Here we concatenate the unconditional and text embeddings into a single batch
502
+ # to avoid doing two forward passes
503
+ image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
504
+
505
+ return image_embeddings
506
+
507
+ def _encode_pose(self, pose, device, num_images_per_prompt, do_classifier_free_guidance):
508
+ dtype = next(self.cc_projection.parameters()).dtype
509
+ if isinstance(pose, torch.Tensor):
510
+ pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)
511
+ else:
512
+ if isinstance(pose[0], list):
513
+ pose = torch.Tensor(pose)
514
+ else:
515
+ pose = torch.Tensor([pose])
516
+ x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)
517
+ pose_embeddings = (
518
+ torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)
519
+ .unsqueeze(1)
520
+ .to(device=device, dtype=dtype)
521
+ ) # B, 1, 4
522
+ # duplicate pose embeddings for each generation per prompt, using mps friendly method
523
+ bs_embed, seq_len, _ = pose_embeddings.shape
524
+ pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)
525
+ pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
526
+ if do_classifier_free_guidance:
527
+ negative_prompt_embeds = torch.zeros_like(pose_embeddings)
528
+
529
+ # For classifier free guidance, we need to do two forward passes.
530
+ # Here we concatenate the unconditional and text embeddings into a single batch
531
+ # to avoid doing two forward passes
532
+ pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])
533
+ return pose_embeddings
534
+
535
+ def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_classifier_free_guidance):
536
+ img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)
537
+ pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)
538
+ prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)
539
+ prompt_embeds = self.cc_projection(prompt_embeds)
540
+ # prompt_embeds = img_prompt_embeds
541
+ # follow 0123, add negative prompt, after projection
542
+ if do_classifier_free_guidance:
543
+ negative_prompt = torch.zeros_like(prompt_embeds)
544
+ prompt_embeds = torch.cat([negative_prompt, prompt_embeds])
545
+ return prompt_embeds
546
+
547
+ def run_safety_checker(self, image, device, dtype):
548
+ if self.safety_checker is not None:
549
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
550
+ image, has_nsfw_concept = self.safety_checker(
551
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
552
+ )
553
+ else:
554
+ has_nsfw_concept = None
555
+ return image, has_nsfw_concept
556
+
557
+ def decode_latents(self, latents):
558
+ latents = 1 / self.vae.config.scaling_factor * latents
559
+ image = self.vae.decode(latents).sample
560
+ image = (image / 2 + 0.5).clamp(0, 1)
561
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
562
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
563
+ return image
564
+
565
+ def prepare_extra_step_kwargs(self, generator, eta):
566
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
567
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
568
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
569
+ # and should be between [0, 1]
570
+
571
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
572
+ extra_step_kwargs = {}
573
+ if accepts_eta:
574
+ extra_step_kwargs["eta"] = eta
575
+
576
+ # check if the scheduler accepts generator
577
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
578
+ if accepts_generator:
579
+ extra_step_kwargs["generator"] = generator
580
+ return extra_step_kwargs
581
+
582
+ def check_inputs(self, image, height, width, callback_steps):
583
+ if (
584
+ not isinstance(image, torch.Tensor)
585
+ and not isinstance(image, PIL.Image.Image)
586
+ and not isinstance(image, list)
587
+ ):
588
+ raise ValueError(
589
+ "`image` has to be of type `torch.FloatTensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is"
590
+ f" {type(image)}"
591
+ )
592
+
593
+ if height % 8 != 0 or width % 8 != 0:
594
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
595
+
596
+ if (callback_steps is None) or (
597
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
598
+ ):
599
+ raise ValueError(
600
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
601
+ f" {type(callback_steps)}."
602
+ )
603
+
604
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
605
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
606
+ if isinstance(generator, list) and len(generator) != batch_size:
607
+ raise ValueError(
608
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
609
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
610
+ )
611
+
612
+ if latents is None:
613
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
614
+ else:
615
+ latents = latents.to(device)
616
+
617
+ # scale the initial noise by the standard deviation required by the scheduler
618
+ latents = latents * self.scheduler.init_noise_sigma
619
+ return latents
620
+
621
+ def prepare_img_latents(self, image, batch_size, dtype, device, generator=None, do_classifier_free_guidance=False):
622
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
623
+ raise ValueError(
624
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
625
+ )
626
+
627
+ if isinstance(image, torch.Tensor):
628
+ # Batch single image
629
+ if image.ndim == 3:
630
+ assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
631
+ image = image.unsqueeze(0)
632
+
633
+ assert image.ndim == 4, "Image must have 4 dimensions"
634
+
635
+ # Check image is in [-1, 1]
636
+ if image.min() < -1 or image.max() > 1:
637
+ raise ValueError("Image should be in [-1, 1] range")
638
+ else:
639
+ # preprocess image
640
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
641
+ image = [image]
642
+
643
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
644
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
645
+ image = np.concatenate(image, axis=0)
646
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
647
+ image = np.concatenate([i[None, :] for i in image], axis=0)
648
+
649
+ image = image.transpose(0, 3, 1, 2)
650
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
651
+
652
+ image = image.to(device=device, dtype=dtype)
653
+
654
+ if isinstance(generator, list) and len(generator) != batch_size:
655
+ raise ValueError(
656
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
657
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
658
+ )
659
+
660
+ if isinstance(generator, list):
661
+ init_latents = [
662
+ self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i])
663
+ for i in range(batch_size) # sample
664
+ ]
665
+ init_latents = torch.cat(init_latents, dim=0)
666
+ else:
667
+ init_latents = self.vae.encode(image).latent_dist.mode()
668
+
669
+ # init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor
670
+ if batch_size > init_latents.shape[0]:
671
+ # init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)
672
+ num_images_per_prompt = batch_size // init_latents.shape[0]
673
+ # duplicate image latents for each generation per prompt, using mps friendly method
674
+ bs_embed, emb_c, emb_h, emb_w = init_latents.shape
675
+ init_latents = init_latents.unsqueeze(1)
676
+ init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)
677
+ init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)
678
+
679
+ # init_latents = torch.cat([init_latents]*2) if do_classifier_free_guidance else init_latents # follow zero123
680
+ init_latents = (
681
+ torch.cat([torch.zeros_like(init_latents), init_latents]) if do_classifier_free_guidance else init_latents
682
+ )
683
+
684
+ init_latents = init_latents.to(device=device, dtype=dtype)
685
+ return init_latents
686
+
687
+ # def load_cc_projection(self, pretrained_weights=None):
688
+ # self.cc_projection = torch.nn.Linear(772, 768)
689
+ # torch.nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])
690
+ # torch.nn.init.zeros_(list(self.cc_projection.parameters())[1])
691
+ # if pretrained_weights is not None:
692
+ # self.cc_projection.load_state_dict(pretrained_weights)
693
+
694
+ @torch.no_grad()
695
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
696
+ def __call__(
697
+ self,
698
+ input_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None,
699
+ prompt_imgs: Union[torch.FloatTensor, PIL.Image.Image] = None,
700
+ poses: Union[List[float], List[List[float]]] = None,
701
+ torch_dtype=torch.float32,
702
+ height: Optional[int] = None,
703
+ width: Optional[int] = None,
704
+ num_inference_steps: int = 50,
705
+ guidance_scale: float = 3.0,
706
+ negative_prompt: Optional[Union[str, List[str]]] = None,
707
+ num_images_per_prompt: Optional[int] = 1,
708
+ eta: float = 0.0,
709
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
710
+ latents: Optional[torch.FloatTensor] = None,
711
+ prompt_embeds: Optional[torch.FloatTensor] = None,
712
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
713
+ output_type: Optional[str] = "pil",
714
+ return_dict: bool = True,
715
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
716
+ callback_steps: int = 1,
717
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
718
+ controlnet_conditioning_scale: float = 1.0,
719
+ ):
720
+ r"""
721
+ Function invoked when calling the pipeline for generation.
722
+
723
+ Args:
724
+ input_imgs (`PIL` or `List[PIL]`, *optional*):
725
+ The single input image for each 3D object
726
+ prompt_imgs (`PIL` or `List[PIL]`, *optional*):
727
+ Same as input_imgs, but will be used later as an image prompt condition, encoded by CLIP feature
728
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
729
+ The height in pixels of the generated image.
730
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
731
+ The width in pixels of the generated image.
732
+ num_inference_steps (`int`, *optional*, defaults to 50):
733
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
734
+ expense of slower inference.
735
+ guidance_scale (`float`, *optional*, defaults to 7.5):
736
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
737
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
738
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
739
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
740
+ usually at the expense of lower image quality.
741
+ negative_prompt (`str` or `List[str]`, *optional*):
742
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
743
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
744
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
745
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
746
+ The number of images to generate per prompt.
747
+ eta (`float`, *optional*, defaults to 0.0):
748
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
749
+ [`schedulers.DDIMScheduler`], will be ignored for others.
750
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
751
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
752
+ to make generation deterministic.
753
+ latents (`torch.FloatTensor`, *optional*):
754
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
755
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
756
+ tensor will ge generated by sampling using the supplied random `generator`.
757
+ prompt_embeds (`torch.FloatTensor`, *optional*):
758
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
759
+ provided, text embeddings will be generated from `prompt` input argument.
760
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
761
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
762
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
763
+ argument.
764
+ output_type (`str`, *optional*, defaults to `"pil"`):
765
+ The output format of the generate image. Choose between
766
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
767
+ return_dict (`bool`, *optional*, defaults to `True`):
768
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
769
+ plain tuple.
770
+ callback (`Callable`, *optional*):
771
+ A function that will be called every `callback_steps` steps during inference. The function will be
772
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
773
+ callback_steps (`int`, *optional*, defaults to 1):
774
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
775
+ called at every step.
776
+ cross_attention_kwargs (`dict`, *optional*):
777
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
778
+ `self.processor` in
779
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
780
+
781
+ Examples:
782
+
783
+ Returns:
784
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
785
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
786
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
787
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
788
+ (nsfw) content, according to the `safety_checker`.
789
+ """
790
+ # 0. Default height and width to unet
791
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
792
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
793
+
794
+ # 1. Check inputs. Raise error if not correct
795
+ # input_image = hint_imgs
796
+ self.check_inputs(input_imgs, height, width, callback_steps)
797
+
798
+ # 2. Define call parameters
799
+ if isinstance(input_imgs, PIL.Image.Image):
800
+ batch_size = 1
801
+ elif isinstance(input_imgs, list):
802
+ batch_size = len(input_imgs)
803
+ else:
804
+ batch_size = input_imgs.shape[0]
805
+ device = self._execution_device
806
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
807
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
808
+ # corresponds to doing no classifier free guidance.
809
+ do_classifier_free_guidance = guidance_scale > 1.0
810
+
811
+ # 3. Encode input image with pose as prompt
812
+ prompt_embeds = self._encode_image_with_pose(
813
+ prompt_imgs, poses, device, num_images_per_prompt, do_classifier_free_guidance
814
+ )
815
+
816
+ # 4. Prepare timesteps
817
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
818
+ timesteps = self.scheduler.timesteps
819
+
820
+ # 5. Prepare latent variables
821
+ latents = self.prepare_latents(
822
+ batch_size * num_images_per_prompt,
823
+ 4,
824
+ height,
825
+ width,
826
+ prompt_embeds.dtype,
827
+ device,
828
+ generator,
829
+ latents,
830
+ )
831
+
832
+ # 6. Prepare image latents
833
+ img_latents = self.prepare_img_latents(
834
+ input_imgs,
835
+ batch_size * num_images_per_prompt,
836
+ prompt_embeds.dtype,
837
+ device,
838
+ generator,
839
+ do_classifier_free_guidance,
840
+ )
841
+
842
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
843
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
844
+
845
+ # 7. Denoising loop
846
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
847
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
848
+ for i, t in enumerate(timesteps):
849
+ # expand the latents if we are doing classifier free guidance
850
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
851
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
852
+ latent_model_input = torch.cat([latent_model_input, img_latents], dim=1)
853
+
854
+ # predict the noise residual
855
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
856
+
857
+ # perform guidance
858
+ if do_classifier_free_guidance:
859
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
860
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
861
+
862
+ # compute the previous noisy sample x_t -> x_t-1
863
+ # latents = self.scheduler.step(noise_pred.to(dtype=torch.float32), t, latents.to(dtype=torch.float32)).prev_sample.to(prompt_embeds.dtype)
864
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
865
+
866
+ # call the callback, if provided
867
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
868
+ progress_bar.update()
869
+ if callback is not None and i % callback_steps == 0:
870
+ step_idx = i // getattr(self.scheduler, "order", 1)
871
+ callback(step_idx, t, latents)
872
+
873
+ # 8. Post-processing
874
+ has_nsfw_concept = None
875
+ if output_type == "latent":
876
+ image = latents
877
+ elif output_type == "pil":
878
+ # 8. Post-processing
879
+ image = self.decode_latents(latents)
880
+ # 10. Convert to PIL
881
+ image = self.numpy_to_pil(image)
882
+ else:
883
+ # 8. Post-processing
884
+ image = self.decode_latents(latents)
885
+
886
+ # Offload last model to CPU
887
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
888
+ self.final_offload_hook.offload()
889
+
890
+ if not return_dict:
891
+ return (image, has_nsfw_concept)
892
+
893
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/regional_prompting_stable_diffusion.py ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Dict, Optional
3
+
4
+ import torch
5
+ import torchvision.transforms.functional as FF
6
+ from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
7
+
8
+ from diffusers import StableDiffusionPipeline
9
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
10
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
11
+ from diffusers.schedulers import KarrasDiffusionSchedulers
12
+ from diffusers.utils import USE_PEFT_BACKEND
13
+
14
+
15
+ try:
16
+ from compel import Compel
17
+ except ImportError:
18
+ Compel = None
19
+
20
+ KCOMM = "ADDCOMM"
21
+ KBRK = "BREAK"
22
+
23
+
24
+ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
25
+ r"""
26
+ Args for Regional Prompting Pipeline:
27
+ rp_args:dict
28
+ Required
29
+ rp_args["mode"]: cols, rows, prompt, prompt-ex
30
+ for cols, rows mode
31
+ rp_args["div"]: ex) 1;1;1(Divide into 3 regions)
32
+ for prompt, prompt-ex mode
33
+ rp_args["th"]: ex) 0.5,0.5,0.6 (threshold for prompt mode)
34
+
35
+ Optional
36
+ rp_args["save_mask"]: True/False (save masks in prompt mode)
37
+
38
+ Pipeline for text-to-image generation using Stable Diffusion.
39
+
40
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
41
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
42
+
43
+ Args:
44
+ vae ([`AutoencoderKL`]):
45
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
46
+ text_encoder ([`CLIPTextModel`]):
47
+ Frozen text-encoder. Stable Diffusion uses the text portion of
48
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
49
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
50
+ tokenizer (`CLIPTokenizer`):
51
+ Tokenizer of class
52
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
53
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
54
+ scheduler ([`SchedulerMixin`]):
55
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
56
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
57
+ safety_checker ([`StableDiffusionSafetyChecker`]):
58
+ Classification module that estimates whether generated images could be considered offensive or harmful.
59
+ Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
60
+ feature_extractor ([`CLIPImageProcessor`]):
61
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
62
+ """
63
+
64
+ def __init__(
65
+ self,
66
+ vae: AutoencoderKL,
67
+ text_encoder: CLIPTextModel,
68
+ tokenizer: CLIPTokenizer,
69
+ unet: UNet2DConditionModel,
70
+ scheduler: KarrasDiffusionSchedulers,
71
+ safety_checker: StableDiffusionSafetyChecker,
72
+ feature_extractor: CLIPFeatureExtractor,
73
+ requires_safety_checker: bool = True,
74
+ ):
75
+ super().__init__(
76
+ vae,
77
+ text_encoder,
78
+ tokenizer,
79
+ unet,
80
+ scheduler,
81
+ safety_checker,
82
+ feature_extractor,
83
+ requires_safety_checker,
84
+ )
85
+ self.register_modules(
86
+ vae=vae,
87
+ text_encoder=text_encoder,
88
+ tokenizer=tokenizer,
89
+ unet=unet,
90
+ scheduler=scheduler,
91
+ safety_checker=safety_checker,
92
+ feature_extractor=feature_extractor,
93
+ )
94
+
95
+ @torch.no_grad()
96
+ def __call__(
97
+ self,
98
+ prompt: str,
99
+ height: int = 512,
100
+ width: int = 512,
101
+ num_inference_steps: int = 50,
102
+ guidance_scale: float = 7.5,
103
+ negative_prompt: str = None,
104
+ num_images_per_prompt: Optional[int] = 1,
105
+ eta: float = 0.0,
106
+ generator: Optional[torch.Generator] = None,
107
+ latents: Optional[torch.FloatTensor] = None,
108
+ output_type: Optional[str] = "pil",
109
+ return_dict: bool = True,
110
+ rp_args: Dict[str, str] = None,
111
+ ):
112
+ active = KBRK in prompt[0] if isinstance(prompt, list) else KBRK in prompt
113
+ if negative_prompt is None:
114
+ negative_prompt = "" if isinstance(prompt, str) else [""] * len(prompt)
115
+
116
+ device = self._execution_device
117
+ regions = 0
118
+
119
+ self.power = int(rp_args["power"]) if "power" in rp_args else 1
120
+
121
+ prompts = prompt if isinstance(prompt, list) else [prompt]
122
+ n_prompts = negative_prompt if isinstance(prompt, str) else [negative_prompt]
123
+ self.batch = batch = num_images_per_prompt * len(prompts)
124
+ all_prompts_cn, all_prompts_p = promptsmaker(prompts, num_images_per_prompt)
125
+ all_n_prompts_cn, _ = promptsmaker(n_prompts, num_images_per_prompt)
126
+
127
+ equal = len(all_prompts_cn) == len(all_n_prompts_cn)
128
+
129
+ if Compel:
130
+ compel = Compel(tokenizer=self.tokenizer, text_encoder=self.text_encoder)
131
+
132
+ def getcompelembs(prps):
133
+ embl = []
134
+ for prp in prps:
135
+ embl.append(compel.build_conditioning_tensor(prp))
136
+ return torch.cat(embl)
137
+
138
+ conds = getcompelembs(all_prompts_cn)
139
+ unconds = getcompelembs(all_n_prompts_cn)
140
+ embs = getcompelembs(prompts)
141
+ n_embs = getcompelembs(n_prompts)
142
+ prompt = negative_prompt = None
143
+ else:
144
+ conds = self.encode_prompt(prompts, device, 1, True)[0]
145
+ unconds = (
146
+ self.encode_prompt(n_prompts, device, 1, True)[0]
147
+ if equal
148
+ else self.encode_prompt(all_n_prompts_cn, device, 1, True)[0]
149
+ )
150
+ embs = n_embs = None
151
+
152
+ if not active:
153
+ pcallback = None
154
+ mode = None
155
+ else:
156
+ if any(x in rp_args["mode"].upper() for x in ["COL", "ROW"]):
157
+ mode = "COL" if "COL" in rp_args["mode"].upper() else "ROW"
158
+ ocells, icells, regions = make_cells(rp_args["div"])
159
+
160
+ elif "PRO" in rp_args["mode"].upper():
161
+ regions = len(all_prompts_p[0])
162
+ mode = "PROMPT"
163
+ reset_attnmaps(self)
164
+ self.ex = "EX" in rp_args["mode"].upper()
165
+ self.target_tokens = target_tokens = tokendealer(self, all_prompts_p)
166
+ thresholds = [float(x) for x in rp_args["th"].split(",")]
167
+
168
+ orig_hw = (height, width)
169
+ revers = True
170
+
171
+ def pcallback(s_self, step: int, timestep: int, latents: torch.FloatTensor, selfs=None):
172
+ if "PRO" in mode: # in Prompt mode, make masks from sum of attension maps
173
+ self.step = step
174
+
175
+ if len(self.attnmaps_sizes) > 3:
176
+ self.history[step] = self.attnmaps.copy()
177
+ for hw in self.attnmaps_sizes:
178
+ allmasks = []
179
+ basemasks = [None] * batch
180
+ for tt, th in zip(target_tokens, thresholds):
181
+ for b in range(batch):
182
+ key = f"{tt}-{b}"
183
+ _, mask, _ = makepmask(self, self.attnmaps[key], hw[0], hw[1], th, step)
184
+ mask = mask.unsqueeze(0).unsqueeze(-1)
185
+ if self.ex:
186
+ allmasks[b::batch] = [x - mask for x in allmasks[b::batch]]
187
+ allmasks[b::batch] = [torch.where(x > 0, 1, 0) for x in allmasks[b::batch]]
188
+ allmasks.append(mask)
189
+ basemasks[b] = mask if basemasks[b] is None else basemasks[b] + mask
190
+ basemasks = [1 - mask for mask in basemasks]
191
+ basemasks = [torch.where(x > 0, 1, 0) for x in basemasks]
192
+ allmasks = basemasks + allmasks
193
+
194
+ self.attnmasks[hw] = torch.cat(allmasks)
195
+ self.maskready = True
196
+ return latents
197
+
198
+ def hook_forward(module):
199
+ # diffusers==0.23.2
200
+ def forward(
201
+ hidden_states: torch.FloatTensor,
202
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
203
+ attention_mask: Optional[torch.FloatTensor] = None,
204
+ temb: Optional[torch.FloatTensor] = None,
205
+ scale: float = 1.0,
206
+ ) -> torch.Tensor:
207
+ attn = module
208
+ xshape = hidden_states.shape
209
+ self.hw = (h, w) = split_dims(xshape[1], *orig_hw)
210
+
211
+ if revers:
212
+ nx, px = hidden_states.chunk(2)
213
+ else:
214
+ px, nx = hidden_states.chunk(2)
215
+
216
+ if equal:
217
+ hidden_states = torch.cat(
218
+ [px for i in range(regions)] + [nx for i in range(regions)],
219
+ 0,
220
+ )
221
+ encoder_hidden_states = torch.cat([conds] + [unconds])
222
+ else:
223
+ hidden_states = torch.cat([px for i in range(regions)] + [nx], 0)
224
+ encoder_hidden_states = torch.cat([conds] + [unconds])
225
+
226
+ residual = hidden_states
227
+
228
+ args = () if USE_PEFT_BACKEND else (scale,)
229
+
230
+ if attn.spatial_norm is not None:
231
+ hidden_states = attn.spatial_norm(hidden_states, temb)
232
+
233
+ input_ndim = hidden_states.ndim
234
+
235
+ if input_ndim == 4:
236
+ batch_size, channel, height, width = hidden_states.shape
237
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
238
+
239
+ batch_size, sequence_length, _ = (
240
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
241
+ )
242
+
243
+ if attention_mask is not None:
244
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
245
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
246
+
247
+ if attn.group_norm is not None:
248
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
249
+
250
+ args = () if USE_PEFT_BACKEND else (scale,)
251
+ query = attn.to_q(hidden_states, *args)
252
+
253
+ if encoder_hidden_states is None:
254
+ encoder_hidden_states = hidden_states
255
+ elif attn.norm_cross:
256
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
257
+
258
+ key = attn.to_k(encoder_hidden_states, *args)
259
+ value = attn.to_v(encoder_hidden_states, *args)
260
+
261
+ inner_dim = key.shape[-1]
262
+ head_dim = inner_dim // attn.heads
263
+
264
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
265
+
266
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
267
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
268
+
269
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
270
+ # TODO: add support for attn.scale when we move to Torch 2.1
271
+ hidden_states = scaled_dot_product_attention(
272
+ self,
273
+ query,
274
+ key,
275
+ value,
276
+ attn_mask=attention_mask,
277
+ dropout_p=0.0,
278
+ is_causal=False,
279
+ getattn="PRO" in mode,
280
+ )
281
+
282
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
283
+ hidden_states = hidden_states.to(query.dtype)
284
+
285
+ # linear proj
286
+ hidden_states = attn.to_out[0](hidden_states, *args)
287
+ # dropout
288
+ hidden_states = attn.to_out[1](hidden_states)
289
+
290
+ if input_ndim == 4:
291
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
292
+
293
+ if attn.residual_connection:
294
+ hidden_states = hidden_states + residual
295
+
296
+ hidden_states = hidden_states / attn.rescale_output_factor
297
+
298
+ #### Regional Prompting Col/Row mode
299
+ if any(x in mode for x in ["COL", "ROW"]):
300
+ reshaped = hidden_states.reshape(hidden_states.size()[0], h, w, hidden_states.size()[2])
301
+ center = reshaped.shape[0] // 2
302
+ px = reshaped[0:center] if equal else reshaped[0:-batch]
303
+ nx = reshaped[center:] if equal else reshaped[-batch:]
304
+ outs = [px, nx] if equal else [px]
305
+ for out in outs:
306
+ c = 0
307
+ for i, ocell in enumerate(ocells):
308
+ for icell in icells[i]:
309
+ if "ROW" in mode:
310
+ out[
311
+ 0:batch,
312
+ int(h * ocell[0]) : int(h * ocell[1]),
313
+ int(w * icell[0]) : int(w * icell[1]),
314
+ :,
315
+ ] = out[
316
+ c * batch : (c + 1) * batch,
317
+ int(h * ocell[0]) : int(h * ocell[1]),
318
+ int(w * icell[0]) : int(w * icell[1]),
319
+ :,
320
+ ]
321
+ else:
322
+ out[
323
+ 0:batch,
324
+ int(h * icell[0]) : int(h * icell[1]),
325
+ int(w * ocell[0]) : int(w * ocell[1]),
326
+ :,
327
+ ] = out[
328
+ c * batch : (c + 1) * batch,
329
+ int(h * icell[0]) : int(h * icell[1]),
330
+ int(w * ocell[0]) : int(w * ocell[1]),
331
+ :,
332
+ ]
333
+ c += 1
334
+ px, nx = (px[0:batch], nx[0:batch]) if equal else (px[0:batch], nx)
335
+ hidden_states = torch.cat([nx, px], 0) if revers else torch.cat([px, nx], 0)
336
+ hidden_states = hidden_states.reshape(xshape)
337
+
338
+ #### Regional Prompting Prompt mode
339
+ elif "PRO" in mode:
340
+ px, nx = (
341
+ torch.chunk(hidden_states) if equal else hidden_states[0:-batch],
342
+ hidden_states[-batch:],
343
+ )
344
+
345
+ if (h, w) in self.attnmasks and self.maskready:
346
+
347
+ def mask(input):
348
+ out = torch.multiply(input, self.attnmasks[(h, w)])
349
+ for b in range(batch):
350
+ for r in range(1, regions):
351
+ out[b] = out[b] + out[r * batch + b]
352
+ return out
353
+
354
+ px, nx = (mask(px), mask(nx)) if equal else (mask(px), nx)
355
+ px, nx = (px[0:batch], nx[0:batch]) if equal else (px[0:batch], nx)
356
+ hidden_states = torch.cat([nx, px], 0) if revers else torch.cat([px, nx], 0)
357
+ return hidden_states
358
+
359
+ return forward
360
+
361
+ def hook_forwards(root_module: torch.nn.Module):
362
+ for name, module in root_module.named_modules():
363
+ if "attn2" in name and module.__class__.__name__ == "Attention":
364
+ module.forward = hook_forward(module)
365
+
366
+ hook_forwards(self.unet)
367
+
368
+ output = StableDiffusionPipeline(**self.components)(
369
+ prompt=prompt,
370
+ prompt_embeds=embs,
371
+ negative_prompt=negative_prompt,
372
+ negative_prompt_embeds=n_embs,
373
+ height=height,
374
+ width=width,
375
+ num_inference_steps=num_inference_steps,
376
+ guidance_scale=guidance_scale,
377
+ num_images_per_prompt=num_images_per_prompt,
378
+ eta=eta,
379
+ generator=generator,
380
+ latents=latents,
381
+ output_type=output_type,
382
+ return_dict=return_dict,
383
+ callback_on_step_end=pcallback,
384
+ )
385
+
386
+ if "save_mask" in rp_args:
387
+ save_mask = rp_args["save_mask"]
388
+ else:
389
+ save_mask = False
390
+
391
+ if mode == "PROMPT" and save_mask:
392
+ saveattnmaps(
393
+ self,
394
+ output,
395
+ height,
396
+ width,
397
+ thresholds,
398
+ num_inference_steps // 2,
399
+ regions,
400
+ )
401
+
402
+ return output
403
+
404
+
405
+ ### Make prompt list for each regions
406
+ def promptsmaker(prompts, batch):
407
+ out_p = []
408
+ plen = len(prompts)
409
+ for prompt in prompts:
410
+ add = ""
411
+ if KCOMM in prompt:
412
+ add, prompt = prompt.split(KCOMM)
413
+ add = add + " "
414
+ prompts = prompt.split(KBRK)
415
+ out_p.append([add + p for p in prompts])
416
+ out = [None] * batch * len(out_p[0]) * len(out_p)
417
+ for p, prs in enumerate(out_p): # inputs prompts
418
+ for r, pr in enumerate(prs): # prompts for regions
419
+ start = (p + r * plen) * batch
420
+ out[start : start + batch] = [pr] * batch # P1R1B1,P1R1B2...,P1R2B1,P1R2B2...,P2R1B1...
421
+ return out, out_p
422
+
423
+
424
+ ### make regions from ratios
425
+ ### ";" makes outercells, "," makes inner cells
426
+ def make_cells(ratios):
427
+ if ";" not in ratios and "," in ratios:
428
+ ratios = ratios.replace(",", ";")
429
+ ratios = ratios.split(";")
430
+ ratios = [inratios.split(",") for inratios in ratios]
431
+
432
+ icells = []
433
+ ocells = []
434
+
435
+ def startend(cells, array):
436
+ current_start = 0
437
+ array = [float(x) for x in array]
438
+ for value in array:
439
+ end = current_start + (value / sum(array))
440
+ cells.append([current_start, end])
441
+ current_start = end
442
+
443
+ startend(ocells, [r[0] for r in ratios])
444
+
445
+ for inratios in ratios:
446
+ if 2 > len(inratios):
447
+ icells.append([[0, 1]])
448
+ else:
449
+ add = []
450
+ startend(add, inratios[1:])
451
+ icells.append(add)
452
+
453
+ return ocells, icells, sum(len(cell) for cell in icells)
454
+
455
+
456
+ def make_emblist(self, prompts):
457
+ with torch.no_grad():
458
+ tokens = self.tokenizer(
459
+ prompts,
460
+ max_length=self.tokenizer.model_max_length,
461
+ padding=True,
462
+ truncation=True,
463
+ return_tensors="pt",
464
+ ).input_ids.to(self.device)
465
+ embs = self.text_encoder(tokens, output_hidden_states=True).last_hidden_state.to(self.device, dtype=self.dtype)
466
+ return embs
467
+
468
+
469
+ def split_dims(xs, height, width):
470
+ xs = xs
471
+
472
+ def repeat_div(x, y):
473
+ while y > 0:
474
+ x = math.ceil(x / 2)
475
+ y = y - 1
476
+ return x
477
+
478
+ scale = math.ceil(math.log2(math.sqrt(height * width / xs)))
479
+ dsh = repeat_div(height, scale)
480
+ dsw = repeat_div(width, scale)
481
+ return dsh, dsw
482
+
483
+
484
+ ##### for prompt mode
485
+ def get_attn_maps(self, attn):
486
+ height, width = self.hw
487
+ target_tokens = self.target_tokens
488
+ if (height, width) not in self.attnmaps_sizes:
489
+ self.attnmaps_sizes.append((height, width))
490
+
491
+ for b in range(self.batch):
492
+ for t in target_tokens:
493
+ power = self.power
494
+ add = attn[b, :, :, t[0] : t[0] + len(t)] ** (power) * (self.attnmaps_sizes.index((height, width)) + 1)
495
+ add = torch.sum(add, dim=2)
496
+ key = f"{t}-{b}"
497
+ if key not in self.attnmaps:
498
+ self.attnmaps[key] = add
499
+ else:
500
+ if self.attnmaps[key].shape[1] != add.shape[1]:
501
+ add = add.view(8, height, width)
502
+ add = FF.resize(add, self.attnmaps_sizes[0], antialias=None)
503
+ add = add.reshape_as(self.attnmaps[key])
504
+
505
+ self.attnmaps[key] = self.attnmaps[key] + add
506
+
507
+
508
+ def reset_attnmaps(self): # init parameters in every batch
509
+ self.step = 0
510
+ self.attnmaps = {} # maked from attention maps
511
+ self.attnmaps_sizes = [] # height,width set of u-net blocks
512
+ self.attnmasks = {} # maked from attnmaps for regions
513
+ self.maskready = False
514
+ self.history = {}
515
+
516
+
517
+ def saveattnmaps(self, output, h, w, th, step, regions):
518
+ masks = []
519
+ for i, mask in enumerate(self.history[step].values()):
520
+ img, _, mask = makepmask(self, mask, h, w, th[i % len(th)], step)
521
+ if self.ex:
522
+ masks = [x - mask for x in masks]
523
+ masks.append(mask)
524
+ if len(masks) == regions - 1:
525
+ output.images.extend([FF.to_pil_image(mask) for mask in masks])
526
+ masks = []
527
+ else:
528
+ output.images.append(img)
529
+
530
+
531
+ def makepmask(
532
+ self, mask, h, w, th, step
533
+ ): # make masks from attention cache return [for preview, for attention, for Latent]
534
+ th = th - step * 0.005
535
+ if 0.05 >= th:
536
+ th = 0.05
537
+ mask = torch.mean(mask, dim=0)
538
+ mask = mask / mask.max().item()
539
+ mask = torch.where(mask > th, 1, 0)
540
+ mask = mask.float()
541
+ mask = mask.view(1, *self.attnmaps_sizes[0])
542
+ img = FF.to_pil_image(mask)
543
+ img = img.resize((w, h))
544
+ mask = FF.resize(mask, (h, w), interpolation=FF.InterpolationMode.NEAREST, antialias=None)
545
+ lmask = mask
546
+ mask = mask.reshape(h * w)
547
+ mask = torch.where(mask > 0.1, 1, 0)
548
+ return img, mask, lmask
549
+
550
+
551
+ def tokendealer(self, all_prompts):
552
+ for prompts in all_prompts:
553
+ targets = [p.split(",")[-1] for p in prompts[1:]]
554
+ tt = []
555
+
556
+ for target in targets:
557
+ ptokens = (
558
+ self.tokenizer(
559
+ prompts,
560
+ max_length=self.tokenizer.model_max_length,
561
+ padding=True,
562
+ truncation=True,
563
+ return_tensors="pt",
564
+ ).input_ids
565
+ )[0]
566
+ ttokens = (
567
+ self.tokenizer(
568
+ target,
569
+ max_length=self.tokenizer.model_max_length,
570
+ padding=True,
571
+ truncation=True,
572
+ return_tensors="pt",
573
+ ).input_ids
574
+ )[0]
575
+
576
+ tlist = []
577
+
578
+ for t in range(ttokens.shape[0] - 2):
579
+ for p in range(ptokens.shape[0]):
580
+ if ttokens[t + 1] == ptokens[p]:
581
+ tlist.append(p)
582
+ if tlist != []:
583
+ tt.append(tlist)
584
+
585
+ return tt
586
+
587
+
588
+ def scaled_dot_product_attention(
589
+ self,
590
+ query,
591
+ key,
592
+ value,
593
+ attn_mask=None,
594
+ dropout_p=0.0,
595
+ is_causal=False,
596
+ scale=None,
597
+ getattn=False,
598
+ ) -> torch.Tensor:
599
+ # Efficient implementation equivalent to the following:
600
+ L, S = query.size(-2), key.size(-2)
601
+ scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale
602
+ attn_bias = torch.zeros(L, S, dtype=query.dtype, device=self.device)
603
+ if is_causal:
604
+ assert attn_mask is None
605
+ temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
606
+ attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf"))
607
+ attn_bias.to(query.dtype)
608
+
609
+ if attn_mask is not None:
610
+ if attn_mask.dtype == torch.bool:
611
+ attn_mask.masked_fill_(attn_mask.logical_not(), float("-inf"))
612
+ else:
613
+ attn_bias += attn_mask
614
+ attn_weight = query @ key.transpose(-2, -1) * scale_factor
615
+ attn_weight += attn_bias
616
+ attn_weight = torch.softmax(attn_weight, dim=-1)
617
+ if getattn:
618
+ get_attn_maps(self, attn_weight)
619
+ attn_weight = torch.dropout(attn_weight, dropout_p, train=True)
620
+ return attn_weight @ value
v0.26.3/rerender_a_video.py ADDED
@@ -0,0 +1,1178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import sys
16
+ from dataclasses import dataclass
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ import torchvision.transforms as T
24
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
25
+
26
+ from diffusers.image_processor import VaeImageProcessor
27
+ from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
28
+ from diffusers.models.attention_processor import Attention, AttnProcessor
29
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
30
+ from diffusers.pipelines.controlnet.pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
31
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
32
+ from diffusers.schedulers import KarrasDiffusionSchedulers
33
+ from diffusers.utils import BaseOutput, deprecate, logging
34
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
35
+
36
+
37
+ gmflow_dir = "/path/to/gmflow"
38
+ sys.path.insert(0, gmflow_dir)
39
+ from gmflow.gmflow import GMFlow # noqa: E402
40
+
41
+ from utils.utils import InputPadder # noqa: E402
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+
47
+ def coords_grid(b, h, w, homogeneous=False, device=None):
48
+ y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]
49
+
50
+ stacks = [x, y]
51
+
52
+ if homogeneous:
53
+ ones = torch.ones_like(x) # [H, W]
54
+ stacks.append(ones)
55
+
56
+ grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W]
57
+
58
+ grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]
59
+
60
+ if device is not None:
61
+ grid = grid.to(device)
62
+
63
+ return grid
64
+
65
+
66
+ def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False):
67
+ # img: [B, C, H, W]
68
+ # sample_coords: [B, 2, H, W] in image scale
69
+ if sample_coords.size(1) != 2: # [B, H, W, 2]
70
+ sample_coords = sample_coords.permute(0, 3, 1, 2)
71
+
72
+ b, _, h, w = sample_coords.shape
73
+
74
+ # Normalize to [-1, 1]
75
+ x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1
76
+ y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1
77
+
78
+ grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2]
79
+
80
+ img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
81
+
82
+ if return_mask:
83
+ mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W]
84
+
85
+ return img, mask
86
+
87
+ return img
88
+
89
+
90
+ def flow_warp(feature, flow, mask=False, mode="bilinear", padding_mode="zeros"):
91
+ b, c, h, w = feature.size()
92
+ assert flow.size(1) == 2
93
+
94
+ grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]
95
+ grid = grid.to(feature.dtype)
96
+ return bilinear_sample(feature, grid, mode=mode, padding_mode=padding_mode, return_mask=mask)
97
+
98
+
99
+ def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5):
100
+ # fwd_flow, bwd_flow: [B, 2, H, W]
101
+ # alpha and beta values are following UnFlow
102
+ # (https://arxiv.org/abs/1711.07837)
103
+ assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4
104
+ assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2
105
+ flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]
106
+
107
+ warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]
108
+ warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]
109
+
110
+ diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]
111
+ diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)
112
+
113
+ threshold = alpha * flow_mag + beta
114
+
115
+ fwd_occ = (diff_fwd > threshold).float() # [B, H, W]
116
+ bwd_occ = (diff_bwd > threshold).float()
117
+
118
+ return fwd_occ, bwd_occ
119
+
120
+
121
+ @torch.no_grad()
122
+ def get_warped_and_mask(flow_model, image1, image2, image3=None, pixel_consistency=False):
123
+ if image3 is None:
124
+ image3 = image1
125
+ padder = InputPadder(image1.shape, padding_factor=8)
126
+ image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
127
+ results_dict = flow_model(
128
+ image1, image2, attn_splits_list=[2], corr_radius_list=[-1], prop_radius_list=[-1], pred_bidir_flow=True
129
+ )
130
+ flow_pr = results_dict["flow_preds"][-1] # [B, 2, H, W]
131
+ fwd_flow = padder.unpad(flow_pr[0]).unsqueeze(0) # [1, 2, H, W]
132
+ bwd_flow = padder.unpad(flow_pr[1]).unsqueeze(0) # [1, 2, H, W]
133
+ fwd_occ, bwd_occ = forward_backward_consistency_check(fwd_flow, bwd_flow) # [1, H, W] float
134
+ if pixel_consistency:
135
+ warped_image1 = flow_warp(image1, bwd_flow)
136
+ bwd_occ = torch.clamp(
137
+ bwd_occ + (abs(image2 - warped_image1).mean(dim=1) > 255 * 0.25).float(), 0, 1
138
+ ).unsqueeze(0)
139
+ warped_results = flow_warp(image3, bwd_flow)
140
+ return warped_results, bwd_occ, bwd_flow
141
+
142
+
143
+ blur = T.GaussianBlur(kernel_size=(9, 9), sigma=(18, 18))
144
+
145
+
146
+ @dataclass
147
+ class TextToVideoSDPipelineOutput(BaseOutput):
148
+ """
149
+ Output class for text-to-video pipelines.
150
+
151
+ Args:
152
+ frames (`List[np.ndarray]` or `torch.FloatTensor`)
153
+ List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as
154
+ a `torch` tensor. The length of the list denotes the video length (the number of frames).
155
+ """
156
+
157
+ frames: Union[List[np.ndarray], torch.FloatTensor]
158
+
159
+
160
+ @torch.no_grad()
161
+ def find_flat_region(mask):
162
+ device = mask.device
163
+ kernel_x = torch.Tensor([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]).unsqueeze(0).unsqueeze(0).to(device)
164
+ kernel_y = torch.Tensor([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]).unsqueeze(0).unsqueeze(0).to(device)
165
+ mask_ = F.pad(mask.unsqueeze(0), (1, 1, 1, 1), mode="replicate")
166
+
167
+ grad_x = torch.nn.functional.conv2d(mask_, kernel_x)
168
+ grad_y = torch.nn.functional.conv2d(mask_, kernel_y)
169
+ return ((abs(grad_x) + abs(grad_y)) == 0).float()[0]
170
+
171
+
172
+ class AttnState:
173
+ STORE = 0
174
+ LOAD = 1
175
+ LOAD_AND_STORE_PREV = 2
176
+
177
+ def __init__(self):
178
+ self.reset()
179
+
180
+ @property
181
+ def state(self):
182
+ return self.__state
183
+
184
+ @property
185
+ def timestep(self):
186
+ return self.__timestep
187
+
188
+ def set_timestep(self, t):
189
+ self.__timestep = t
190
+
191
+ def reset(self):
192
+ self.__state = AttnState.STORE
193
+ self.__timestep = 0
194
+
195
+ def to_load(self):
196
+ self.__state = AttnState.LOAD
197
+
198
+ def to_load_and_store_prev(self):
199
+ self.__state = AttnState.LOAD_AND_STORE_PREV
200
+
201
+
202
+ class CrossFrameAttnProcessor(AttnProcessor):
203
+ """
204
+ Cross frame attention processor. Each frame attends the first frame and previous frame.
205
+
206
+ Args:
207
+ attn_state: Whether the model is processing the first frame or an intermediate frame
208
+ """
209
+
210
+ def __init__(self, attn_state: AttnState):
211
+ super().__init__()
212
+ self.attn_state = attn_state
213
+ self.first_maps = {}
214
+ self.prev_maps = {}
215
+
216
+ def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None):
217
+ # Is self attention
218
+ if encoder_hidden_states is None:
219
+ t = self.attn_state.timestep
220
+ if self.attn_state.state == AttnState.STORE:
221
+ self.first_maps[t] = hidden_states.detach()
222
+ self.prev_maps[t] = hidden_states.detach()
223
+ res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
224
+ else:
225
+ if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
226
+ tmp = hidden_states.detach()
227
+ cross_map = torch.cat((self.first_maps[t], self.prev_maps[t]), dim=1)
228
+ res = super().__call__(attn, hidden_states, cross_map, attention_mask, temb)
229
+ if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
230
+ self.prev_maps[t] = tmp
231
+ else:
232
+ res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
233
+
234
+ return res
235
+
236
+
237
+ def prepare_image(image):
238
+ if isinstance(image, torch.Tensor):
239
+ # Batch single image
240
+ if image.ndim == 3:
241
+ image = image.unsqueeze(0)
242
+
243
+ image = image.to(dtype=torch.float32)
244
+ else:
245
+ # preprocess image
246
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
247
+ image = [image]
248
+
249
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
250
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
251
+ image = np.concatenate(image, axis=0)
252
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
253
+ image = np.concatenate([i[None, :] for i in image], axis=0)
254
+
255
+ image = image.transpose(0, 3, 1, 2)
256
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
257
+
258
+ return image
259
+
260
+
261
+ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
262
+ r"""
263
+ Pipeline for video-to-video translation using Stable Diffusion with Rerender Algorithm.
264
+
265
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
266
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
267
+
268
+ In addition the pipeline inherits the following loading methods:
269
+ - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
270
+
271
+ Args:
272
+ vae ([`AutoencoderKL`]):
273
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
274
+ text_encoder ([`CLIPTextModel`]):
275
+ Frozen text-encoder. Stable Diffusion uses the text portion of
276
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
277
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
278
+ tokenizer (`CLIPTokenizer`):
279
+ Tokenizer of class
280
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
281
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
282
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
283
+ Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
284
+ as a list, the outputs from each ControlNet are added together to create one combined additional
285
+ conditioning.
286
+ scheduler ([`SchedulerMixin`]):
287
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
288
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
289
+ safety_checker ([`StableDiffusionSafetyChecker`]):
290
+ Classification module that estimates whether generated images could be considered offensive or harmful.
291
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
292
+ feature_extractor ([`CLIPImageProcessor`]):
293
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
294
+ """
295
+
296
+ _optional_components = ["safety_checker", "feature_extractor"]
297
+
298
+ def __init__(
299
+ self,
300
+ vae: AutoencoderKL,
301
+ text_encoder: CLIPTextModel,
302
+ tokenizer: CLIPTokenizer,
303
+ unet: UNet2DConditionModel,
304
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
305
+ scheduler: KarrasDiffusionSchedulers,
306
+ safety_checker: StableDiffusionSafetyChecker,
307
+ feature_extractor: CLIPImageProcessor,
308
+ image_encoder=None,
309
+ requires_safety_checker: bool = True,
310
+ ):
311
+ super().__init__(
312
+ vae,
313
+ text_encoder,
314
+ tokenizer,
315
+ unet,
316
+ controlnet,
317
+ scheduler,
318
+ safety_checker,
319
+ feature_extractor,
320
+ image_encoder,
321
+ requires_safety_checker,
322
+ )
323
+
324
+ if safety_checker is None and requires_safety_checker:
325
+ logger.warning(
326
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
327
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
328
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
329
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
330
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
331
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
332
+ )
333
+
334
+ if safety_checker is not None and feature_extractor is None:
335
+ raise ValueError(
336
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
337
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
338
+ )
339
+
340
+ if isinstance(controlnet, (list, tuple)):
341
+ controlnet = MultiControlNetModel(controlnet)
342
+
343
+ self.register_modules(
344
+ vae=vae,
345
+ text_encoder=text_encoder,
346
+ tokenizer=tokenizer,
347
+ unet=unet,
348
+ controlnet=controlnet,
349
+ scheduler=scheduler,
350
+ safety_checker=safety_checker,
351
+ feature_extractor=feature_extractor,
352
+ )
353
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
354
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
355
+ self.control_image_processor = VaeImageProcessor(
356
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
357
+ )
358
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
359
+ self.attn_state = AttnState()
360
+ attn_processor_dict = {}
361
+ for k in unet.attn_processors.keys():
362
+ if k.startswith("up"):
363
+ attn_processor_dict[k] = CrossFrameAttnProcessor(self.attn_state)
364
+ else:
365
+ attn_processor_dict[k] = AttnProcessor()
366
+
367
+ self.unet.set_attn_processor(attn_processor_dict)
368
+
369
+ flow_model = GMFlow(
370
+ feature_channels=128,
371
+ num_scales=1,
372
+ upsample_factor=8,
373
+ num_head=1,
374
+ attention_type="swin",
375
+ ffn_dim_expansion=4,
376
+ num_transformer_layers=6,
377
+ ).to("cuda")
378
+
379
+ checkpoint = torch.utils.model_zoo.load_url(
380
+ "https://huggingface.co/Anonymous-sub/Rerender/resolve/main/models/gmflow_sintel-0c07dcb3.pth",
381
+ map_location=lambda storage, loc: storage,
382
+ )
383
+ weights = checkpoint["model"] if "model" in checkpoint else checkpoint
384
+ flow_model.load_state_dict(weights, strict=False)
385
+ flow_model.eval()
386
+ self.flow_model = flow_model
387
+
388
+ # Modified from src/diffusers/pipelines/controlnet/pipeline_controlnet.StableDiffusionControlNetImg2ImgPipeline.check_inputs
389
+ def check_inputs(
390
+ self,
391
+ prompt,
392
+ callback_steps,
393
+ negative_prompt=None,
394
+ prompt_embeds=None,
395
+ negative_prompt_embeds=None,
396
+ controlnet_conditioning_scale=1.0,
397
+ control_guidance_start=0.0,
398
+ control_guidance_end=1.0,
399
+ ):
400
+ if (callback_steps is None) or (
401
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
402
+ ):
403
+ raise ValueError(
404
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
405
+ f" {type(callback_steps)}."
406
+ )
407
+
408
+ if prompt is not None and prompt_embeds is not None:
409
+ raise ValueError(
410
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
411
+ " only forward one of the two."
412
+ )
413
+ elif prompt is None and prompt_embeds is None:
414
+ raise ValueError(
415
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
416
+ )
417
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
418
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
419
+
420
+ if negative_prompt is not None and negative_prompt_embeds is not None:
421
+ raise ValueError(
422
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
423
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
424
+ )
425
+
426
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
427
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
428
+ raise ValueError(
429
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
430
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
431
+ f" {negative_prompt_embeds.shape}."
432
+ )
433
+
434
+ # `prompt` needs more sophisticated handling when there are multiple
435
+ # conditionings.
436
+ if isinstance(self.controlnet, MultiControlNetModel):
437
+ if isinstance(prompt, list):
438
+ logger.warning(
439
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
440
+ " prompts. The conditionings will be fixed across the prompts."
441
+ )
442
+
443
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
444
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
445
+ )
446
+
447
+ # Check `controlnet_conditioning_scale`
448
+ if (
449
+ isinstance(self.controlnet, ControlNetModel)
450
+ or is_compiled
451
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
452
+ ):
453
+ if not isinstance(controlnet_conditioning_scale, float):
454
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
455
+ elif (
456
+ isinstance(self.controlnet, MultiControlNetModel)
457
+ or is_compiled
458
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
459
+ ):
460
+ if isinstance(controlnet_conditioning_scale, list):
461
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
462
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
463
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
464
+ self.controlnet.nets
465
+ ):
466
+ raise ValueError(
467
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
468
+ " the same length as the number of controlnets"
469
+ )
470
+ else:
471
+ assert False
472
+
473
+ if len(control_guidance_start) != len(control_guidance_end):
474
+ raise ValueError(
475
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
476
+ )
477
+
478
+ if isinstance(self.controlnet, MultiControlNetModel):
479
+ if len(control_guidance_start) != len(self.controlnet.nets):
480
+ raise ValueError(
481
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
482
+ )
483
+
484
+ for start, end in zip(control_guidance_start, control_guidance_end):
485
+ if start >= end:
486
+ raise ValueError(
487
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
488
+ )
489
+ if start < 0.0:
490
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
491
+ if end > 1.0:
492
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
493
+
494
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
495
+ def prepare_control_image(
496
+ self,
497
+ image,
498
+ width,
499
+ height,
500
+ batch_size,
501
+ num_images_per_prompt,
502
+ device,
503
+ dtype,
504
+ do_classifier_free_guidance=False,
505
+ guess_mode=False,
506
+ ):
507
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
508
+ image_batch_size = image.shape[0]
509
+
510
+ if image_batch_size == 1:
511
+ repeat_by = batch_size
512
+ else:
513
+ # image batch size is the same as prompt batch size
514
+ repeat_by = num_images_per_prompt
515
+
516
+ image = image.repeat_interleave(repeat_by, dim=0)
517
+
518
+ image = image.to(device=device, dtype=dtype)
519
+
520
+ if do_classifier_free_guidance and not guess_mode:
521
+ image = torch.cat([image] * 2)
522
+
523
+ return image
524
+
525
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
526
+ def get_timesteps(self, num_inference_steps, strength, device):
527
+ # get the original timestep using init_timestep
528
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
529
+
530
+ t_start = max(num_inference_steps - init_timestep, 0)
531
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
532
+
533
+ return timesteps, num_inference_steps - t_start
534
+
535
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents
536
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
537
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
538
+ raise ValueError(
539
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
540
+ )
541
+
542
+ image = image.to(device=device, dtype=dtype)
543
+
544
+ batch_size = batch_size * num_images_per_prompt
545
+
546
+ if image.shape[1] == 4:
547
+ init_latents = image
548
+
549
+ else:
550
+ if isinstance(generator, list) and len(generator) != batch_size:
551
+ raise ValueError(
552
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
553
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
554
+ )
555
+
556
+ elif isinstance(generator, list):
557
+ init_latents = [
558
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
559
+ ]
560
+ init_latents = torch.cat(init_latents, dim=0)
561
+ else:
562
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
563
+
564
+ init_latents = self.vae.config.scaling_factor * init_latents
565
+
566
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
567
+ # expand init_latents for batch_size
568
+ deprecation_message = (
569
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
570
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
571
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
572
+ " your script to pass as many initial images as text prompts to suppress this warning."
573
+ )
574
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
575
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
576
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
577
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
578
+ raise ValueError(
579
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
580
+ )
581
+ else:
582
+ init_latents = torch.cat([init_latents], dim=0)
583
+
584
+ shape = init_latents.shape
585
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
586
+
587
+ # get latents
588
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
589
+ latents = init_latents
590
+
591
+ return latents
592
+
593
+ @torch.no_grad()
594
+ def __call__(
595
+ self,
596
+ prompt: Union[str, List[str]] = None,
597
+ frames: Union[List[np.ndarray], torch.FloatTensor] = None,
598
+ control_frames: Union[List[np.ndarray], torch.FloatTensor] = None,
599
+ strength: float = 0.8,
600
+ num_inference_steps: int = 50,
601
+ guidance_scale: float = 7.5,
602
+ negative_prompt: Optional[Union[str, List[str]]] = None,
603
+ eta: float = 0.0,
604
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
605
+ latents: Optional[torch.FloatTensor] = None,
606
+ prompt_embeds: Optional[torch.FloatTensor] = None,
607
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
608
+ output_type: Optional[str] = "pil",
609
+ return_dict: bool = True,
610
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
611
+ callback_steps: int = 1,
612
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
613
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
614
+ guess_mode: bool = False,
615
+ control_guidance_start: Union[float, List[float]] = 0.0,
616
+ control_guidance_end: Union[float, List[float]] = 1.0,
617
+ warp_start: Union[float, List[float]] = 0.0,
618
+ warp_end: Union[float, List[float]] = 0.3,
619
+ mask_start: Union[float, List[float]] = 0.5,
620
+ mask_end: Union[float, List[float]] = 0.8,
621
+ smooth_boundary: bool = True,
622
+ mask_strength: Union[float, List[float]] = 0.5,
623
+ inner_strength: Union[float, List[float]] = 0.9,
624
+ ):
625
+ r"""
626
+ Function invoked when calling the pipeline for generation.
627
+
628
+ Args:
629
+ prompt (`str` or `List[str]`, *optional*):
630
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
631
+ instead.
632
+ frames (`List[np.ndarray]` or `torch.FloatTensor`): The input images to be used as the starting point for the image generation process.
633
+ control_frames (`List[np.ndarray]` or `torch.FloatTensor`): The ControlNet input images condition to provide guidance to the `unet` for generation.
634
+ strength ('float'): SDEdit strength.
635
+ num_inference_steps (`int`, *optional*, defaults to 50):
636
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
637
+ expense of slower inference.
638
+ guidance_scale (`float`, *optional*, defaults to 7.5):
639
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
640
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
641
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
642
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
643
+ usually at the expense of lower image quality.
644
+ negative_prompt (`str` or `List[str]`, *optional*):
645
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
646
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
647
+ less than `1`).
648
+ eta (`float`, *optional*, defaults to 0.0):
649
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
650
+ [`schedulers.DDIMScheduler`], will be ignored for others.
651
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
652
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
653
+ to make generation deterministic.
654
+ latents (`torch.FloatTensor`, *optional*):
655
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
656
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
657
+ tensor will ge generated by sampling using the supplied random `generator`.
658
+ prompt_embeds (`torch.FloatTensor`, *optional*):
659
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
660
+ provided, text embeddings will be generated from `prompt` input argument.
661
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
662
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
663
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
664
+ argument.
665
+ output_type (`str`, *optional*, defaults to `"pil"`):
666
+ The output format of the generate image. Choose between
667
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
668
+ return_dict (`bool`, *optional*, defaults to `True`):
669
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
670
+ plain tuple.
671
+ callback (`Callable`, *optional*):
672
+ A function that will be called every `callback_steps` steps during inference. The function will be
673
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
674
+ callback_steps (`int`, *optional*, defaults to 1):
675
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
676
+ called at every step.
677
+ cross_attention_kwargs (`dict`, *optional*):
678
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
679
+ `self.processor` in
680
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
681
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
682
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
683
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
684
+ corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
685
+ than for [`~StableDiffusionControlNetPipeline.__call__`].
686
+ guess_mode (`bool`, *optional*, defaults to `False`):
687
+ In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
688
+ you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
689
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
690
+ The percentage of total steps at which the controlnet starts applying.
691
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
692
+ The percentage of total steps at which the controlnet stops applying.
693
+ warp_start (`float`): Shape-aware fusion start timestep.
694
+ warp_end (`float`): Shape-aware fusion end timestep.
695
+ mask_start (`float`): Pixel-aware fusion start timestep.
696
+ mask_end (`float`):Pixel-aware fusion end timestep.
697
+ smooth_boundary (`bool`): Smooth fusion boundary. Set `True` to prevent artifacts at boundary.
698
+ mask_strength (`float`): Pixel-aware fusion strength.
699
+ inner_strength (`float`): Pixel-aware fusion detail level.
700
+
701
+ Examples:
702
+
703
+ Returns:
704
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
705
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
706
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
707
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
708
+ (nsfw) content, according to the `safety_checker`.
709
+ """
710
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
711
+
712
+ # align format for control guidance
713
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
714
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
715
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
716
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
717
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
718
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
719
+ control_guidance_start, control_guidance_end = (
720
+ mult * [control_guidance_start],
721
+ mult * [control_guidance_end],
722
+ )
723
+
724
+ # 1. Check inputs. Raise error if not correct
725
+ self.check_inputs(
726
+ prompt,
727
+ callback_steps,
728
+ negative_prompt,
729
+ prompt_embeds,
730
+ negative_prompt_embeds,
731
+ controlnet_conditioning_scale,
732
+ control_guidance_start,
733
+ control_guidance_end,
734
+ )
735
+
736
+ # 2. Define call parameters
737
+ # Currently we only support 1 prompt
738
+ if prompt is not None and isinstance(prompt, str):
739
+ batch_size = 1
740
+ elif prompt is not None and isinstance(prompt, list):
741
+ assert False
742
+ else:
743
+ assert False
744
+ num_images_per_prompt = 1
745
+
746
+ device = self._execution_device
747
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
748
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
749
+ # corresponds to doing no classifier free guidance.
750
+ do_classifier_free_guidance = guidance_scale > 1.0
751
+
752
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
753
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
754
+
755
+ global_pool_conditions = (
756
+ controlnet.config.global_pool_conditions
757
+ if isinstance(controlnet, ControlNetModel)
758
+ else controlnet.nets[0].config.global_pool_conditions
759
+ )
760
+ guess_mode = guess_mode or global_pool_conditions
761
+
762
+ # 3. Encode input prompt
763
+ text_encoder_lora_scale = (
764
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
765
+ )
766
+ prompt_embeds = self._encode_prompt(
767
+ prompt,
768
+ device,
769
+ num_images_per_prompt,
770
+ do_classifier_free_guidance,
771
+ negative_prompt,
772
+ prompt_embeds=prompt_embeds,
773
+ negative_prompt_embeds=negative_prompt_embeds,
774
+ lora_scale=text_encoder_lora_scale,
775
+ )
776
+
777
+ # 4. Process the first frame
778
+ height, width = None, None
779
+ output_frames = []
780
+ self.attn_state.reset()
781
+
782
+ # 4.1 prepare frames
783
+ image = self.image_processor.preprocess(frames[0]).to(dtype=torch.float32)
784
+ first_image = image[0] # C, H, W
785
+
786
+ # 4.2 Prepare controlnet_conditioning_image
787
+ # Currently we only support single control
788
+ if isinstance(controlnet, ControlNetModel):
789
+ control_image = self.prepare_control_image(
790
+ image=control_frames[0],
791
+ width=width,
792
+ height=height,
793
+ batch_size=batch_size,
794
+ num_images_per_prompt=1,
795
+ device=device,
796
+ dtype=controlnet.dtype,
797
+ do_classifier_free_guidance=do_classifier_free_guidance,
798
+ guess_mode=guess_mode,
799
+ )
800
+ else:
801
+ assert False
802
+
803
+ # 4.3 Prepare timesteps
804
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
805
+ timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
806
+ latent_timestep = timesteps[:1].repeat(batch_size)
807
+
808
+ # 4.4 Prepare latent variables
809
+ latents = self.prepare_latents(
810
+ image,
811
+ latent_timestep,
812
+ batch_size,
813
+ num_images_per_prompt,
814
+ prompt_embeds.dtype,
815
+ device,
816
+ generator,
817
+ )
818
+
819
+ # 4.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
820
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
821
+
822
+ # 4.6 Create tensor stating which controlnets to keep
823
+ controlnet_keep = []
824
+ for i in range(len(timesteps)):
825
+ keeps = [
826
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
827
+ for s, e in zip(control_guidance_start, control_guidance_end)
828
+ ]
829
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
830
+
831
+ first_x0_list = []
832
+
833
+ # 4.7 Denoising loop
834
+ num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
835
+ with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
836
+ for i, t in enumerate(timesteps):
837
+ self.attn_state.set_timestep(t.item())
838
+
839
+ # expand the latents if we are doing classifier free guidance
840
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
841
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
842
+
843
+ # controlnet(s) inference
844
+ if guess_mode and do_classifier_free_guidance:
845
+ # Infer ControlNet only for the conditional batch.
846
+ control_model_input = latents
847
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
848
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
849
+ else:
850
+ control_model_input = latent_model_input
851
+ controlnet_prompt_embeds = prompt_embeds
852
+
853
+ if isinstance(controlnet_keep[i], list):
854
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
855
+ else:
856
+ controlnet_cond_scale = controlnet_conditioning_scale
857
+ if isinstance(controlnet_cond_scale, list):
858
+ controlnet_cond_scale = controlnet_cond_scale[0]
859
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
860
+
861
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
862
+ control_model_input,
863
+ t,
864
+ encoder_hidden_states=controlnet_prompt_embeds,
865
+ controlnet_cond=control_image,
866
+ conditioning_scale=cond_scale,
867
+ guess_mode=guess_mode,
868
+ return_dict=False,
869
+ )
870
+
871
+ if guess_mode and do_classifier_free_guidance:
872
+ # Infered ControlNet only for the conditional batch.
873
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
874
+ # add 0 to the unconditional batch to keep it unchanged.
875
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
876
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
877
+
878
+ # predict the noise residual
879
+ noise_pred = self.unet(
880
+ latent_model_input,
881
+ t,
882
+ encoder_hidden_states=prompt_embeds,
883
+ cross_attention_kwargs=cross_attention_kwargs,
884
+ down_block_additional_residuals=down_block_res_samples,
885
+ mid_block_additional_residual=mid_block_res_sample,
886
+ return_dict=False,
887
+ )[0]
888
+
889
+ # perform guidance
890
+ if do_classifier_free_guidance:
891
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
892
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
893
+
894
+ alpha_prod_t = self.scheduler.alphas_cumprod[t]
895
+ beta_prod_t = 1 - alpha_prod_t
896
+ pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
897
+ first_x0 = pred_x0.detach()
898
+ first_x0_list.append(first_x0)
899
+
900
+ # compute the previous noisy sample x_t -> x_t-1
901
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
902
+
903
+ # call the callback, if provided
904
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
905
+ progress_bar.update()
906
+ if callback is not None and i % callback_steps == 0:
907
+ callback(i, t, latents)
908
+
909
+ if not output_type == "latent":
910
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
911
+ else:
912
+ image = latents
913
+
914
+ first_result = image
915
+ prev_result = image
916
+ do_denormalize = [True] * image.shape[0]
917
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
918
+
919
+ output_frames.append(image[0])
920
+
921
+ # 5. Process each frame
922
+ for idx in range(1, len(frames)):
923
+ image = frames[idx]
924
+ prev_image = frames[idx - 1]
925
+ control_image = control_frames[idx]
926
+ # 5.1 prepare frames
927
+ image = self.image_processor.preprocess(image).to(dtype=torch.float32)
928
+ prev_image = self.image_processor.preprocess(prev_image).to(dtype=torch.float32)
929
+
930
+ warped_0, bwd_occ_0, bwd_flow_0 = get_warped_and_mask(
931
+ self.flow_model, first_image, image[0], first_result, False
932
+ )
933
+ blend_mask_0 = blur(F.max_pool2d(bwd_occ_0, kernel_size=9, stride=1, padding=4))
934
+ blend_mask_0 = torch.clamp(blend_mask_0 + bwd_occ_0, 0, 1)
935
+
936
+ warped_pre, bwd_occ_pre, bwd_flow_pre = get_warped_and_mask(
937
+ self.flow_model, prev_image[0], image[0], prev_result, False
938
+ )
939
+ blend_mask_pre = blur(F.max_pool2d(bwd_occ_pre, kernel_size=9, stride=1, padding=4))
940
+ blend_mask_pre = torch.clamp(blend_mask_pre + bwd_occ_pre, 0, 1)
941
+
942
+ warp_mask = 1 - F.max_pool2d(blend_mask_0, kernel_size=8)
943
+ warp_flow = F.interpolate(bwd_flow_0 / 8.0, scale_factor=1.0 / 8, mode="bilinear")
944
+
945
+ # 5.2 Prepare controlnet_conditioning_image
946
+ # Currently we only support single control
947
+ if isinstance(controlnet, ControlNetModel):
948
+ control_image = self.prepare_control_image(
949
+ image=control_image,
950
+ width=width,
951
+ height=height,
952
+ batch_size=batch_size,
953
+ num_images_per_prompt=1,
954
+ device=device,
955
+ dtype=controlnet.dtype,
956
+ do_classifier_free_guidance=do_classifier_free_guidance,
957
+ guess_mode=guess_mode,
958
+ )
959
+ else:
960
+ assert False
961
+
962
+ # 5.3 Prepare timesteps
963
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
964
+ timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
965
+ latent_timestep = timesteps[:1].repeat(batch_size)
966
+
967
+ skip_t = int(num_inference_steps * (1 - strength))
968
+ warp_start_t = int(warp_start * num_inference_steps)
969
+ warp_end_t = int(warp_end * num_inference_steps)
970
+ mask_start_t = int(mask_start * num_inference_steps)
971
+ mask_end_t = int(mask_end * num_inference_steps)
972
+
973
+ # 5.4 Prepare latent variables
974
+ init_latents = self.prepare_latents(
975
+ image,
976
+ latent_timestep,
977
+ batch_size,
978
+ num_images_per_prompt,
979
+ prompt_embeds.dtype,
980
+ device,
981
+ generator,
982
+ )
983
+
984
+ # 5.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
985
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
986
+
987
+ # 5.6 Create tensor stating which controlnets to keep
988
+ controlnet_keep = []
989
+ for i in range(len(timesteps)):
990
+ keeps = [
991
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
992
+ for s, e in zip(control_guidance_start, control_guidance_end)
993
+ ]
994
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
995
+
996
+ # 5.7 Denoising loop
997
+ num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
998
+
999
+ def denoising_loop(latents, mask=None, xtrg=None, noise_rescale=None):
1000
+ dir_xt = 0
1001
+ latents_dtype = latents.dtype
1002
+ with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
1003
+ for i, t in enumerate(timesteps):
1004
+ self.attn_state.set_timestep(t.item())
1005
+ if i + skip_t >= mask_start_t and i + skip_t <= mask_end_t and xtrg is not None:
1006
+ rescale = torch.maximum(1.0 - mask, (1 - mask**2) ** 0.5 * inner_strength)
1007
+ if noise_rescale is not None:
1008
+ rescale = (1.0 - mask) * (1 - noise_rescale) + rescale * noise_rescale
1009
+ noise = randn_tensor(xtrg.shape, generator=generator, device=device, dtype=xtrg.dtype)
1010
+ latents_ref = self.scheduler.add_noise(xtrg, noise, t)
1011
+ latents = latents_ref * mask + (1.0 - mask) * (latents - dir_xt) + rescale * dir_xt
1012
+ latents = latents.to(latents_dtype)
1013
+
1014
+ # expand the latents if we are doing classifier free guidance
1015
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1016
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1017
+
1018
+ # controlnet(s) inference
1019
+ if guess_mode and do_classifier_free_guidance:
1020
+ # Infer ControlNet only for the conditional batch.
1021
+ control_model_input = latents
1022
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1023
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1024
+ else:
1025
+ control_model_input = latent_model_input
1026
+ controlnet_prompt_embeds = prompt_embeds
1027
+
1028
+ if isinstance(controlnet_keep[i], list):
1029
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1030
+ else:
1031
+ controlnet_cond_scale = controlnet_conditioning_scale
1032
+ if isinstance(controlnet_cond_scale, list):
1033
+ controlnet_cond_scale = controlnet_cond_scale[0]
1034
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1035
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1036
+ control_model_input,
1037
+ t,
1038
+ encoder_hidden_states=controlnet_prompt_embeds,
1039
+ controlnet_cond=control_image,
1040
+ conditioning_scale=cond_scale,
1041
+ guess_mode=guess_mode,
1042
+ return_dict=False,
1043
+ )
1044
+
1045
+ if guess_mode and do_classifier_free_guidance:
1046
+ # Infered ControlNet only for the conditional batch.
1047
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1048
+ # add 0 to the unconditional batch to keep it unchanged.
1049
+ down_block_res_samples = [
1050
+ torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples
1051
+ ]
1052
+ mid_block_res_sample = torch.cat(
1053
+ [torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
1054
+ )
1055
+
1056
+ # predict the noise residual
1057
+ noise_pred = self.unet(
1058
+ latent_model_input,
1059
+ t,
1060
+ encoder_hidden_states=prompt_embeds,
1061
+ cross_attention_kwargs=cross_attention_kwargs,
1062
+ down_block_additional_residuals=down_block_res_samples,
1063
+ mid_block_additional_residual=mid_block_res_sample,
1064
+ return_dict=False,
1065
+ )[0]
1066
+
1067
+ # perform guidance
1068
+ if do_classifier_free_guidance:
1069
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1070
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1071
+
1072
+ # Get pred_x0 from scheduler
1073
+ alpha_prod_t = self.scheduler.alphas_cumprod[t]
1074
+ beta_prod_t = 1 - alpha_prod_t
1075
+ pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
1076
+
1077
+ if i + skip_t >= warp_start_t and i + skip_t <= warp_end_t:
1078
+ # warp x_0
1079
+ pred_x0 = (
1080
+ flow_warp(first_x0_list[i], warp_flow, mode="nearest") * warp_mask
1081
+ + (1 - warp_mask) * pred_x0
1082
+ )
1083
+
1084
+ # get x_t from x_0
1085
+ latents = self.scheduler.add_noise(pred_x0, noise_pred, t).to(latents_dtype)
1086
+
1087
+ prev_t = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
1088
+ if i == len(timesteps) - 1:
1089
+ alpha_t_prev = 1.0
1090
+ else:
1091
+ alpha_t_prev = self.scheduler.alphas_cumprod[prev_t]
1092
+
1093
+ dir_xt = (1.0 - alpha_t_prev) ** 0.5 * noise_pred
1094
+
1095
+ # compute the previous noisy sample x_t -> x_t-1
1096
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[
1097
+ 0
1098
+ ]
1099
+
1100
+ # call the callback, if provided
1101
+ if i == len(timesteps) - 1 or (
1102
+ (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
1103
+ ):
1104
+ progress_bar.update()
1105
+ if callback is not None and i % callback_steps == 0:
1106
+ callback(i, t, latents)
1107
+
1108
+ return latents
1109
+
1110
+ if mask_start_t <= mask_end_t:
1111
+ self.attn_state.to_load()
1112
+ else:
1113
+ self.attn_state.to_load_and_store_prev()
1114
+ latents = denoising_loop(init_latents)
1115
+
1116
+ if mask_start_t <= mask_end_t:
1117
+ direct_result = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1118
+
1119
+ blend_results = (1 - blend_mask_pre) * warped_pre + blend_mask_pre * direct_result
1120
+ blend_results = (1 - blend_mask_0) * warped_0 + blend_mask_0 * blend_results
1121
+
1122
+ bwd_occ = 1 - torch.clamp(1 - bwd_occ_pre + 1 - bwd_occ_0, 0, 1)
1123
+ blend_mask = blur(F.max_pool2d(bwd_occ, kernel_size=9, stride=1, padding=4))
1124
+ blend_mask = 1 - torch.clamp(blend_mask + bwd_occ, 0, 1)
1125
+
1126
+ blend_results = blend_results.to(latents.dtype)
1127
+ xtrg = self.vae.encode(blend_results).latent_dist.sample(generator)
1128
+ xtrg = self.vae.config.scaling_factor * xtrg
1129
+ blend_results_rec = self.vae.decode(xtrg / self.vae.config.scaling_factor, return_dict=False)[0]
1130
+ xtrg_rec = self.vae.encode(blend_results_rec).latent_dist.sample(generator)
1131
+ xtrg_rec = self.vae.config.scaling_factor * xtrg_rec
1132
+ xtrg_ = xtrg + (xtrg - xtrg_rec)
1133
+ blend_results_rec_new = self.vae.decode(xtrg_ / self.vae.config.scaling_factor, return_dict=False)[0]
1134
+ tmp = (abs(blend_results_rec_new - blend_results).mean(dim=1, keepdims=True) > 0.25).float()
1135
+
1136
+ mask_x = F.max_pool2d(
1137
+ (F.interpolate(tmp, scale_factor=1 / 8.0, mode="bilinear") > 0).float(),
1138
+ kernel_size=3,
1139
+ stride=1,
1140
+ padding=1,
1141
+ )
1142
+
1143
+ mask = 1 - F.max_pool2d(1 - blend_mask, kernel_size=8) # * (1-mask_x)
1144
+
1145
+ if smooth_boundary:
1146
+ noise_rescale = find_flat_region(mask)
1147
+ else:
1148
+ noise_rescale = torch.ones_like(mask)
1149
+
1150
+ xtrg = (xtrg + (1 - mask_x) * (xtrg - xtrg_rec)) * mask
1151
+ xtrg = xtrg.to(latents.dtype)
1152
+
1153
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1154
+ timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
1155
+
1156
+ self.attn_state.to_load_and_store_prev()
1157
+ latents = denoising_loop(init_latents, mask * mask_strength, xtrg, noise_rescale)
1158
+
1159
+ if not output_type == "latent":
1160
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
1161
+ else:
1162
+ image = latents
1163
+
1164
+ prev_result = image
1165
+
1166
+ do_denormalize = [True] * image.shape[0]
1167
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1168
+
1169
+ output_frames.append(image[0])
1170
+
1171
+ # Offload last model to CPU
1172
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1173
+ self.final_offload_hook.offload()
1174
+
1175
+ if not return_dict:
1176
+ return output_frames
1177
+
1178
+ return TextToVideoSDPipelineOutput(frames=output_frames)
v0.26.3/run_onnx_controlnet.py ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import inspect
3
+ import os
4
+ import time
5
+ import warnings
6
+ from typing import Any, Callable, Dict, List, Optional, Union
7
+
8
+ import numpy as np
9
+ import PIL.Image
10
+ import torch
11
+ from PIL import Image
12
+ from transformers import CLIPTokenizer
13
+
14
+ from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
15
+ from diffusers.image_processor import VaeImageProcessor
16
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
17
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
18
+ from diffusers.schedulers import KarrasDiffusionSchedulers
19
+ from diffusers.utils import (
20
+ deprecate,
21
+ logging,
22
+ replace_example_docstring,
23
+ )
24
+ from diffusers.utils.torch_utils import randn_tensor
25
+
26
+
27
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
+
29
+
30
+ EXAMPLE_DOC_STRING = """
31
+ Examples:
32
+ ```py
33
+ >>> # !pip install opencv-python transformers accelerate
34
+ >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
35
+ >>> from diffusers.utils import load_image
36
+ >>> import numpy as np
37
+ >>> import torch
38
+
39
+ >>> import cv2
40
+ >>> from PIL import Image
41
+
42
+ >>> # download an image
43
+ >>> image = load_image(
44
+ ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
45
+ ... )
46
+ >>> np_image = np.array(image)
47
+
48
+ >>> # get canny image
49
+ >>> np_image = cv2.Canny(np_image, 100, 200)
50
+ >>> np_image = np_image[:, :, None]
51
+ >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
52
+ >>> canny_image = Image.fromarray(np_image)
53
+
54
+ >>> # load control net and stable diffusion v1-5
55
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
56
+ >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
57
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
58
+ ... )
59
+
60
+ >>> # speed up diffusion process with faster scheduler and memory optimization
61
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
62
+ >>> pipe.enable_model_cpu_offload()
63
+
64
+ >>> # generate image
65
+ >>> generator = torch.manual_seed(0)
66
+ >>> image = pipe(
67
+ ... "futuristic-looking woman",
68
+ ... num_inference_steps=20,
69
+ ... generator=generator,
70
+ ... image=image,
71
+ ... control_image=canny_image,
72
+ ... ).images[0]
73
+ ```
74
+ """
75
+
76
+
77
+ def prepare_image(image):
78
+ if isinstance(image, torch.Tensor):
79
+ # Batch single image
80
+ if image.ndim == 3:
81
+ image = image.unsqueeze(0)
82
+
83
+ image = image.to(dtype=torch.float32)
84
+ else:
85
+ # preprocess image
86
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
87
+ image = [image]
88
+
89
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
90
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
91
+ image = np.concatenate(image, axis=0)
92
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
93
+ image = np.concatenate([i[None, :] for i in image], axis=0)
94
+
95
+ image = image.transpose(0, 3, 1, 2)
96
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
97
+
98
+ return image
99
+
100
+
101
+ class OnnxStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
102
+ vae_encoder: OnnxRuntimeModel
103
+ vae_decoder: OnnxRuntimeModel
104
+ text_encoder: OnnxRuntimeModel
105
+ tokenizer: CLIPTokenizer
106
+ unet: OnnxRuntimeModel
107
+ scheduler: KarrasDiffusionSchedulers
108
+
109
+ def __init__(
110
+ self,
111
+ vae_encoder: OnnxRuntimeModel,
112
+ vae_decoder: OnnxRuntimeModel,
113
+ text_encoder: OnnxRuntimeModel,
114
+ tokenizer: CLIPTokenizer,
115
+ unet: OnnxRuntimeModel,
116
+ scheduler: KarrasDiffusionSchedulers,
117
+ ):
118
+ super().__init__()
119
+
120
+ self.register_modules(
121
+ vae_encoder=vae_encoder,
122
+ vae_decoder=vae_decoder,
123
+ text_encoder=text_encoder,
124
+ tokenizer=tokenizer,
125
+ unet=unet,
126
+ scheduler=scheduler,
127
+ )
128
+ self.vae_scale_factor = 2 ** (4 - 1)
129
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
130
+ self.control_image_processor = VaeImageProcessor(
131
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
132
+ )
133
+
134
+ def _encode_prompt(
135
+ self,
136
+ prompt: Union[str, List[str]],
137
+ num_images_per_prompt: Optional[int],
138
+ do_classifier_free_guidance: bool,
139
+ negative_prompt: Optional[str],
140
+ prompt_embeds: Optional[np.ndarray] = None,
141
+ negative_prompt_embeds: Optional[np.ndarray] = None,
142
+ ):
143
+ r"""
144
+ Encodes the prompt into text encoder hidden states.
145
+
146
+ Args:
147
+ prompt (`str` or `List[str]`):
148
+ prompt to be encoded
149
+ num_images_per_prompt (`int`):
150
+ number of images that should be generated per prompt
151
+ do_classifier_free_guidance (`bool`):
152
+ whether to use classifier free guidance or not
153
+ negative_prompt (`str` or `List[str]`):
154
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
155
+ if `guidance_scale` is less than `1`).
156
+ prompt_embeds (`np.ndarray`, *optional*):
157
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
158
+ provided, text embeddings will be generated from `prompt` input argument.
159
+ negative_prompt_embeds (`np.ndarray`, *optional*):
160
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
161
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
162
+ argument.
163
+ """
164
+ if prompt is not None and isinstance(prompt, str):
165
+ batch_size = 1
166
+ elif prompt is not None and isinstance(prompt, list):
167
+ batch_size = len(prompt)
168
+ else:
169
+ batch_size = prompt_embeds.shape[0]
170
+
171
+ if prompt_embeds is None:
172
+ # get prompt text embeddings
173
+ text_inputs = self.tokenizer(
174
+ prompt,
175
+ padding="max_length",
176
+ max_length=self.tokenizer.model_max_length,
177
+ truncation=True,
178
+ return_tensors="np",
179
+ )
180
+ text_input_ids = text_inputs.input_ids
181
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
182
+
183
+ if not np.array_equal(text_input_ids, untruncated_ids):
184
+ removed_text = self.tokenizer.batch_decode(
185
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
186
+ )
187
+ logger.warning(
188
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
189
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
190
+ )
191
+
192
+ prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
193
+
194
+ prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
195
+
196
+ # get unconditional embeddings for classifier free guidance
197
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
198
+ uncond_tokens: List[str]
199
+ if negative_prompt is None:
200
+ uncond_tokens = [""] * batch_size
201
+ elif type(prompt) is not type(negative_prompt):
202
+ raise TypeError(
203
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
204
+ f" {type(prompt)}."
205
+ )
206
+ elif isinstance(negative_prompt, str):
207
+ uncond_tokens = [negative_prompt] * batch_size
208
+ elif batch_size != len(negative_prompt):
209
+ raise ValueError(
210
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
211
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
212
+ " the batch size of `prompt`."
213
+ )
214
+ else:
215
+ uncond_tokens = negative_prompt
216
+
217
+ max_length = prompt_embeds.shape[1]
218
+ uncond_input = self.tokenizer(
219
+ uncond_tokens,
220
+ padding="max_length",
221
+ max_length=max_length,
222
+ truncation=True,
223
+ return_tensors="np",
224
+ )
225
+ negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
226
+
227
+ if do_classifier_free_guidance:
228
+ negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
229
+
230
+ # For classifier free guidance, we need to do two forward passes.
231
+ # Here we concatenate the unconditional and text embeddings into a single batch
232
+ # to avoid doing two forward passes
233
+ prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
234
+
235
+ return prompt_embeds
236
+
237
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
238
+ def decode_latents(self, latents):
239
+ warnings.warn(
240
+ "The decode_latents method is deprecated and will be removed in a future version. Please"
241
+ " use VaeImageProcessor instead",
242
+ FutureWarning,
243
+ )
244
+ latents = 1 / self.vae.config.scaling_factor * latents
245
+ image = self.vae.decode(latents, return_dict=False)[0]
246
+ image = (image / 2 + 0.5).clamp(0, 1)
247
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
248
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
249
+ return image
250
+
251
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
252
+ def prepare_extra_step_kwargs(self, generator, eta):
253
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
254
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
255
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
256
+ # and should be between [0, 1]
257
+
258
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
259
+ extra_step_kwargs = {}
260
+ if accepts_eta:
261
+ extra_step_kwargs["eta"] = eta
262
+
263
+ # check if the scheduler accepts generator
264
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
265
+ if accepts_generator:
266
+ extra_step_kwargs["generator"] = generator
267
+ return extra_step_kwargs
268
+
269
+ def check_inputs(
270
+ self,
271
+ num_controlnet,
272
+ prompt,
273
+ image,
274
+ callback_steps,
275
+ negative_prompt=None,
276
+ prompt_embeds=None,
277
+ negative_prompt_embeds=None,
278
+ controlnet_conditioning_scale=1.0,
279
+ control_guidance_start=0.0,
280
+ control_guidance_end=1.0,
281
+ ):
282
+ if (callback_steps is None) or (
283
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
284
+ ):
285
+ raise ValueError(
286
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
287
+ f" {type(callback_steps)}."
288
+ )
289
+
290
+ if prompt is not None and prompt_embeds is not None:
291
+ raise ValueError(
292
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
293
+ " only forward one of the two."
294
+ )
295
+ elif prompt is None and prompt_embeds is None:
296
+ raise ValueError(
297
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
298
+ )
299
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
300
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
301
+
302
+ if negative_prompt is not None and negative_prompt_embeds is not None:
303
+ raise ValueError(
304
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
305
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
306
+ )
307
+
308
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
309
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
310
+ raise ValueError(
311
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
312
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
313
+ f" {negative_prompt_embeds.shape}."
314
+ )
315
+
316
+ # Check `image`
317
+ if num_controlnet == 1:
318
+ self.check_image(image, prompt, prompt_embeds)
319
+ elif num_controlnet > 1:
320
+ if not isinstance(image, list):
321
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
322
+
323
+ # When `image` is a nested list:
324
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
325
+ elif any(isinstance(i, list) for i in image):
326
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
327
+ elif len(image) != num_controlnet:
328
+ raise ValueError(
329
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
330
+ )
331
+
332
+ for image_ in image:
333
+ self.check_image(image_, prompt, prompt_embeds)
334
+ else:
335
+ assert False
336
+
337
+ # Check `controlnet_conditioning_scale`
338
+ if num_controlnet == 1:
339
+ if not isinstance(controlnet_conditioning_scale, float):
340
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
341
+ elif num_controlnet > 1:
342
+ if isinstance(controlnet_conditioning_scale, list):
343
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
344
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
345
+ elif (
346
+ isinstance(controlnet_conditioning_scale, list)
347
+ and len(controlnet_conditioning_scale) != num_controlnet
348
+ ):
349
+ raise ValueError(
350
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
351
+ " the same length as the number of controlnets"
352
+ )
353
+ else:
354
+ assert False
355
+
356
+ if len(control_guidance_start) != len(control_guidance_end):
357
+ raise ValueError(
358
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
359
+ )
360
+
361
+ if num_controlnet > 1:
362
+ if len(control_guidance_start) != num_controlnet:
363
+ raise ValueError(
364
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
365
+ )
366
+
367
+ for start, end in zip(control_guidance_start, control_guidance_end):
368
+ if start >= end:
369
+ raise ValueError(
370
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
371
+ )
372
+ if start < 0.0:
373
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
374
+ if end > 1.0:
375
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
376
+
377
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
378
+ def check_image(self, image, prompt, prompt_embeds):
379
+ image_is_pil = isinstance(image, PIL.Image.Image)
380
+ image_is_tensor = isinstance(image, torch.Tensor)
381
+ image_is_np = isinstance(image, np.ndarray)
382
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
383
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
384
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
385
+
386
+ if (
387
+ not image_is_pil
388
+ and not image_is_tensor
389
+ and not image_is_np
390
+ and not image_is_pil_list
391
+ and not image_is_tensor_list
392
+ and not image_is_np_list
393
+ ):
394
+ raise TypeError(
395
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
396
+ )
397
+
398
+ if image_is_pil:
399
+ image_batch_size = 1
400
+ else:
401
+ image_batch_size = len(image)
402
+
403
+ if prompt is not None and isinstance(prompt, str):
404
+ prompt_batch_size = 1
405
+ elif prompt is not None and isinstance(prompt, list):
406
+ prompt_batch_size = len(prompt)
407
+ elif prompt_embeds is not None:
408
+ prompt_batch_size = prompt_embeds.shape[0]
409
+
410
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
411
+ raise ValueError(
412
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
413
+ )
414
+
415
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
416
+ def prepare_control_image(
417
+ self,
418
+ image,
419
+ width,
420
+ height,
421
+ batch_size,
422
+ num_images_per_prompt,
423
+ device,
424
+ dtype,
425
+ do_classifier_free_guidance=False,
426
+ guess_mode=False,
427
+ ):
428
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
429
+ image_batch_size = image.shape[0]
430
+
431
+ if image_batch_size == 1:
432
+ repeat_by = batch_size
433
+ else:
434
+ # image batch size is the same as prompt batch size
435
+ repeat_by = num_images_per_prompt
436
+
437
+ image = image.repeat_interleave(repeat_by, dim=0)
438
+
439
+ image = image.to(device=device, dtype=dtype)
440
+
441
+ if do_classifier_free_guidance and not guess_mode:
442
+ image = torch.cat([image] * 2)
443
+
444
+ return image
445
+
446
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
447
+ def get_timesteps(self, num_inference_steps, strength, device):
448
+ # get the original timestep using init_timestep
449
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
450
+
451
+ t_start = max(num_inference_steps - init_timestep, 0)
452
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
453
+
454
+ return timesteps, num_inference_steps - t_start
455
+
456
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
457
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
458
+ raise ValueError(
459
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
460
+ )
461
+
462
+ image = image.to(device=device, dtype=dtype)
463
+
464
+ batch_size = batch_size * num_images_per_prompt
465
+
466
+ if image.shape[1] == 4:
467
+ init_latents = image
468
+
469
+ else:
470
+ _image = image.cpu().detach().numpy()
471
+ init_latents = self.vae_encoder(sample=_image)[0]
472
+ init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
473
+ init_latents = 0.18215 * init_latents
474
+
475
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
476
+ # expand init_latents for batch_size
477
+ deprecation_message = (
478
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
479
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
480
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
481
+ " your script to pass as many initial images as text prompts to suppress this warning."
482
+ )
483
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
484
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
485
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
486
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
487
+ raise ValueError(
488
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
489
+ )
490
+ else:
491
+ init_latents = torch.cat([init_latents], dim=0)
492
+
493
+ shape = init_latents.shape
494
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
495
+
496
+ # get latents
497
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
498
+ latents = init_latents
499
+
500
+ return latents
501
+
502
+ @torch.no_grad()
503
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
504
+ def __call__(
505
+ self,
506
+ num_controlnet: int,
507
+ fp16: bool = True,
508
+ prompt: Union[str, List[str]] = None,
509
+ image: Union[
510
+ torch.FloatTensor,
511
+ PIL.Image.Image,
512
+ np.ndarray,
513
+ List[torch.FloatTensor],
514
+ List[PIL.Image.Image],
515
+ List[np.ndarray],
516
+ ] = None,
517
+ control_image: Union[
518
+ torch.FloatTensor,
519
+ PIL.Image.Image,
520
+ np.ndarray,
521
+ List[torch.FloatTensor],
522
+ List[PIL.Image.Image],
523
+ List[np.ndarray],
524
+ ] = None,
525
+ height: Optional[int] = None,
526
+ width: Optional[int] = None,
527
+ strength: float = 0.8,
528
+ num_inference_steps: int = 50,
529
+ guidance_scale: float = 7.5,
530
+ negative_prompt: Optional[Union[str, List[str]]] = None,
531
+ num_images_per_prompt: Optional[int] = 1,
532
+ eta: float = 0.0,
533
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
534
+ latents: Optional[torch.FloatTensor] = None,
535
+ prompt_embeds: Optional[torch.FloatTensor] = None,
536
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
537
+ output_type: Optional[str] = "pil",
538
+ return_dict: bool = True,
539
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
540
+ callback_steps: int = 1,
541
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
542
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
543
+ guess_mode: bool = False,
544
+ control_guidance_start: Union[float, List[float]] = 0.0,
545
+ control_guidance_end: Union[float, List[float]] = 1.0,
546
+ ):
547
+ r"""
548
+ Function invoked when calling the pipeline for generation.
549
+
550
+ Args:
551
+ prompt (`str` or `List[str]`, *optional*):
552
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
553
+ instead.
554
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
555
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
556
+ The initial image will be used as the starting point for the image generation process. Can also accept
557
+ image latents as `image`, if passing latents directly, it will not be encoded again.
558
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
559
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
560
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
561
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
562
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
563
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
564
+ specified in init, images must be passed as a list such that each element of the list can be correctly
565
+ batched for input to a single controlnet.
566
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
567
+ The height in pixels of the generated image.
568
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
569
+ The width in pixels of the generated image.
570
+ num_inference_steps (`int`, *optional*, defaults to 50):
571
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
572
+ expense of slower inference.
573
+ guidance_scale (`float`, *optional*, defaults to 7.5):
574
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
575
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
576
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
577
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
578
+ usually at the expense of lower image quality.
579
+ negative_prompt (`str` or `List[str]`, *optional*):
580
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
581
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
582
+ less than `1`).
583
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
584
+ The number of images to generate per prompt.
585
+ eta (`float`, *optional*, defaults to 0.0):
586
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
587
+ [`schedulers.DDIMScheduler`], will be ignored for others.
588
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
589
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
590
+ to make generation deterministic.
591
+ latents (`torch.FloatTensor`, *optional*):
592
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
593
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
594
+ tensor will ge generated by sampling using the supplied random `generator`.
595
+ prompt_embeds (`torch.FloatTensor`, *optional*):
596
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
597
+ provided, text embeddings will be generated from `prompt` input argument.
598
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
599
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
600
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
601
+ argument.
602
+ output_type (`str`, *optional*, defaults to `"pil"`):
603
+ The output format of the generate image. Choose between
604
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
605
+ return_dict (`bool`, *optional*, defaults to `True`):
606
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
607
+ plain tuple.
608
+ callback (`Callable`, *optional*):
609
+ A function that will be called every `callback_steps` steps during inference. The function will be
610
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
611
+ callback_steps (`int`, *optional*, defaults to 1):
612
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
613
+ called at every step.
614
+ cross_attention_kwargs (`dict`, *optional*):
615
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
616
+ `self.processor` in
617
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
618
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
619
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
620
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
621
+ corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
622
+ than for [`~StableDiffusionControlNetPipeline.__call__`].
623
+ guess_mode (`bool`, *optional*, defaults to `False`):
624
+ In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
625
+ you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
626
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
627
+ The percentage of total steps at which the controlnet starts applying.
628
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
629
+ The percentage of total steps at which the controlnet stops applying.
630
+
631
+ Examples:
632
+
633
+ Returns:
634
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
635
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
636
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
637
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
638
+ (nsfw) content, according to the `safety_checker`.
639
+ """
640
+ if fp16:
641
+ torch_dtype = torch.float16
642
+ np_dtype = np.float16
643
+ else:
644
+ torch_dtype = torch.float32
645
+ np_dtype = np.float32
646
+
647
+ # align format for control guidance
648
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
649
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
650
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
651
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
652
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
653
+ mult = num_controlnet
654
+ control_guidance_start, control_guidance_end = (
655
+ mult * [control_guidance_start],
656
+ mult * [control_guidance_end],
657
+ )
658
+
659
+ # 1. Check inputs. Raise error if not correct
660
+ self.check_inputs(
661
+ num_controlnet,
662
+ prompt,
663
+ control_image,
664
+ callback_steps,
665
+ negative_prompt,
666
+ prompt_embeds,
667
+ negative_prompt_embeds,
668
+ controlnet_conditioning_scale,
669
+ control_guidance_start,
670
+ control_guidance_end,
671
+ )
672
+
673
+ # 2. Define call parameters
674
+ if prompt is not None and isinstance(prompt, str):
675
+ batch_size = 1
676
+ elif prompt is not None and isinstance(prompt, list):
677
+ batch_size = len(prompt)
678
+ else:
679
+ batch_size = prompt_embeds.shape[0]
680
+
681
+ device = self._execution_device
682
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
683
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
684
+ # corresponds to doing no classifier free guidance.
685
+ do_classifier_free_guidance = guidance_scale > 1.0
686
+
687
+ if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
688
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
689
+
690
+ # 3. Encode input prompt
691
+ prompt_embeds = self._encode_prompt(
692
+ prompt,
693
+ num_images_per_prompt,
694
+ do_classifier_free_guidance,
695
+ negative_prompt,
696
+ prompt_embeds=prompt_embeds,
697
+ negative_prompt_embeds=negative_prompt_embeds,
698
+ )
699
+ # 4. Prepare image
700
+ image = self.image_processor.preprocess(image).to(dtype=torch.float32)
701
+
702
+ # 5. Prepare controlnet_conditioning_image
703
+ if num_controlnet == 1:
704
+ control_image = self.prepare_control_image(
705
+ image=control_image,
706
+ width=width,
707
+ height=height,
708
+ batch_size=batch_size * num_images_per_prompt,
709
+ num_images_per_prompt=num_images_per_prompt,
710
+ device=device,
711
+ dtype=torch_dtype,
712
+ do_classifier_free_guidance=do_classifier_free_guidance,
713
+ guess_mode=guess_mode,
714
+ )
715
+ elif num_controlnet > 1:
716
+ control_images = []
717
+
718
+ for control_image_ in control_image:
719
+ control_image_ = self.prepare_control_image(
720
+ image=control_image_,
721
+ width=width,
722
+ height=height,
723
+ batch_size=batch_size * num_images_per_prompt,
724
+ num_images_per_prompt=num_images_per_prompt,
725
+ device=device,
726
+ dtype=torch_dtype,
727
+ do_classifier_free_guidance=do_classifier_free_guidance,
728
+ guess_mode=guess_mode,
729
+ )
730
+
731
+ control_images.append(control_image_)
732
+
733
+ control_image = control_images
734
+ else:
735
+ assert False
736
+
737
+ # 5. Prepare timesteps
738
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
739
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
740
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
741
+
742
+ # 6. Prepare latent variables
743
+ latents = self.prepare_latents(
744
+ image,
745
+ latent_timestep,
746
+ batch_size,
747
+ num_images_per_prompt,
748
+ torch_dtype,
749
+ device,
750
+ generator,
751
+ )
752
+
753
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
754
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
755
+
756
+ # 7.1 Create tensor stating which controlnets to keep
757
+ controlnet_keep = []
758
+ for i in range(len(timesteps)):
759
+ keeps = [
760
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
761
+ for s, e in zip(control_guidance_start, control_guidance_end)
762
+ ]
763
+ controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
764
+
765
+ # 8. Denoising loop
766
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
767
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
768
+ for i, t in enumerate(timesteps):
769
+ # expand the latents if we are doing classifier free guidance
770
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
771
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
772
+
773
+ if isinstance(controlnet_keep[i], list):
774
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
775
+ else:
776
+ controlnet_cond_scale = controlnet_conditioning_scale
777
+ if isinstance(controlnet_cond_scale, list):
778
+ controlnet_cond_scale = controlnet_cond_scale[0]
779
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
780
+
781
+ # predict the noise residual
782
+ _latent_model_input = latent_model_input.cpu().detach().numpy()
783
+ _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
784
+ _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
785
+
786
+ if num_controlnet == 1:
787
+ control_images = np.array([control_image], dtype=np_dtype)
788
+ else:
789
+ control_images = []
790
+ for _control_img in control_image:
791
+ _control_img = _control_img.cpu().detach().numpy()
792
+ control_images.append(_control_img)
793
+ control_images = np.array(control_images, dtype=np_dtype)
794
+
795
+ control_scales = np.array(cond_scale, dtype=np_dtype)
796
+ control_scales = np.resize(control_scales, (num_controlnet, 1))
797
+
798
+ noise_pred = self.unet(
799
+ sample=_latent_model_input,
800
+ timestep=_t,
801
+ encoder_hidden_states=_prompt_embeds,
802
+ controlnet_conds=control_images,
803
+ conditioning_scales=control_scales,
804
+ )[0]
805
+ noise_pred = torch.from_numpy(noise_pred).to(device)
806
+
807
+ # perform guidance
808
+ if do_classifier_free_guidance:
809
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
810
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
811
+
812
+ # compute the previous noisy sample x_t -> x_t-1
813
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
814
+
815
+ # call the callback, if provided
816
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
817
+ progress_bar.update()
818
+ if callback is not None and i % callback_steps == 0:
819
+ step_idx = i // getattr(self.scheduler, "order", 1)
820
+ callback(step_idx, t, latents)
821
+
822
+ if not output_type == "latent":
823
+ _latents = latents.cpu().detach().numpy() / 0.18215
824
+ _latents = np.array(_latents, dtype=np_dtype)
825
+ image = self.vae_decoder(latent_sample=_latents)[0]
826
+ image = torch.from_numpy(image).to(device, dtype=torch.float32)
827
+ has_nsfw_concept = None
828
+ else:
829
+ image = latents
830
+ has_nsfw_concept = None
831
+
832
+ if has_nsfw_concept is None:
833
+ do_denormalize = [True] * image.shape[0]
834
+ else:
835
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
836
+
837
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
838
+
839
+ if not return_dict:
840
+ return (image, has_nsfw_concept)
841
+
842
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
843
+
844
+
845
+ if __name__ == "__main__":
846
+ parser = argparse.ArgumentParser()
847
+
848
+ parser.add_argument(
849
+ "--sd_model",
850
+ type=str,
851
+ required=True,
852
+ help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
853
+ )
854
+
855
+ parser.add_argument(
856
+ "--onnx_model_dir",
857
+ type=str,
858
+ required=True,
859
+ help="Path to the ONNX directory",
860
+ )
861
+
862
+ parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
863
+
864
+ args = parser.parse_args()
865
+
866
+ qr_image = Image.open(args.qr_img_path)
867
+ qr_image = qr_image.resize((512, 512))
868
+
869
+ # init stable diffusion pipeline
870
+ pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
871
+ pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
872
+
873
+ provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
874
+ onnx_pipeline = OnnxStableDiffusionControlNetImg2ImgPipeline(
875
+ vae_encoder=OnnxRuntimeModel.from_pretrained(
876
+ os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
877
+ ),
878
+ vae_decoder=OnnxRuntimeModel.from_pretrained(
879
+ os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
880
+ ),
881
+ text_encoder=OnnxRuntimeModel.from_pretrained(
882
+ os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
883
+ ),
884
+ tokenizer=pipeline.tokenizer,
885
+ unet=OnnxRuntimeModel.from_pretrained(os.path.join(args.onnx_model_dir, "unet"), provider=provider),
886
+ scheduler=pipeline.scheduler,
887
+ )
888
+ onnx_pipeline = onnx_pipeline.to("cuda")
889
+
890
+ prompt = "a cute cat fly to the moon"
891
+ negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
892
+
893
+ for i in range(10):
894
+ start_time = time.time()
895
+ image = onnx_pipeline(
896
+ num_controlnet=2,
897
+ prompt=prompt,
898
+ negative_prompt=negative_prompt,
899
+ image=qr_image,
900
+ control_image=[qr_image, qr_image],
901
+ width=512,
902
+ height=512,
903
+ strength=0.75,
904
+ num_inference_steps=20,
905
+ num_images_per_prompt=1,
906
+ controlnet_conditioning_scale=[0.8, 0.8],
907
+ control_guidance_start=[0.3, 0.3],
908
+ control_guidance_end=[0.9, 0.9],
909
+ ).images[0]
910
+ print(time.time() - start_time)
911
+ image.save("output_qr_code.png")
v0.26.3/run_tensorrt_controlnet.py ADDED
@@ -0,0 +1,1022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import atexit
3
+ import inspect
4
+ import os
5
+ import time
6
+ import warnings
7
+ from typing import Any, Callable, Dict, List, Optional, Union
8
+
9
+ import numpy as np
10
+ import PIL.Image
11
+ import pycuda.driver as cuda
12
+ import tensorrt as trt
13
+ import torch
14
+ from PIL import Image
15
+ from pycuda.tools import make_default_context
16
+ from transformers import CLIPTokenizer
17
+
18
+ from diffusers import OnnxRuntimeModel, StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler
19
+ from diffusers.image_processor import VaeImageProcessor
20
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
21
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.schedulers import KarrasDiffusionSchedulers
23
+ from diffusers.utils import (
24
+ deprecate,
25
+ logging,
26
+ replace_example_docstring,
27
+ )
28
+ from diffusers.utils.torch_utils import randn_tensor
29
+
30
+
31
+ # Initialize CUDA
32
+ cuda.init()
33
+ context = make_default_context()
34
+ device = context.get_device()
35
+ atexit.register(context.pop)
36
+
37
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
+
39
+
40
+ def load_engine(trt_runtime, engine_path):
41
+ with open(engine_path, "rb") as f:
42
+ engine_data = f.read()
43
+ engine = trt_runtime.deserialize_cuda_engine(engine_data)
44
+ return engine
45
+
46
+
47
+ class TensorRTModel:
48
+ def __init__(
49
+ self,
50
+ trt_engine_path,
51
+ **kwargs,
52
+ ):
53
+ cuda.init()
54
+ stream = cuda.Stream()
55
+ TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
56
+ trt.init_libnvinfer_plugins(TRT_LOGGER, "")
57
+ trt_runtime = trt.Runtime(TRT_LOGGER)
58
+ engine = load_engine(trt_runtime, trt_engine_path)
59
+ context = engine.create_execution_context()
60
+
61
+ # allocates memory for network inputs/outputs on both CPU and GPU
62
+ host_inputs = []
63
+ cuda_inputs = []
64
+ host_outputs = []
65
+ cuda_outputs = []
66
+ bindings = []
67
+ input_names = []
68
+ output_names = []
69
+
70
+ for binding in engine:
71
+ datatype = engine.get_binding_dtype(binding)
72
+ if datatype == trt.DataType.HALF:
73
+ dtype = np.float16
74
+ else:
75
+ dtype = np.float32
76
+
77
+ shape = tuple(engine.get_binding_shape(binding))
78
+ host_mem = cuda.pagelocked_empty(shape, dtype)
79
+ cuda_mem = cuda.mem_alloc(host_mem.nbytes)
80
+ bindings.append(int(cuda_mem))
81
+
82
+ if engine.binding_is_input(binding):
83
+ host_inputs.append(host_mem)
84
+ cuda_inputs.append(cuda_mem)
85
+ input_names.append(binding)
86
+ else:
87
+ host_outputs.append(host_mem)
88
+ cuda_outputs.append(cuda_mem)
89
+ output_names.append(binding)
90
+
91
+ self.stream = stream
92
+ self.context = context
93
+ self.engine = engine
94
+
95
+ self.host_inputs = host_inputs
96
+ self.cuda_inputs = cuda_inputs
97
+ self.host_outputs = host_outputs
98
+ self.cuda_outputs = cuda_outputs
99
+ self.bindings = bindings
100
+ self.batch_size = engine.max_batch_size
101
+
102
+ self.input_names = input_names
103
+ self.output_names = output_names
104
+
105
+ def __call__(self, **kwargs):
106
+ context = self.context
107
+ stream = self.stream
108
+ bindings = self.bindings
109
+
110
+ host_inputs = self.host_inputs
111
+ cuda_inputs = self.cuda_inputs
112
+ host_outputs = self.host_outputs
113
+ cuda_outputs = self.cuda_outputs
114
+
115
+ for idx, input_name in enumerate(self.input_names):
116
+ _input = kwargs[input_name]
117
+ np.copyto(host_inputs[idx], _input)
118
+ # transfer input data to the GPU
119
+ cuda.memcpy_htod_async(cuda_inputs[idx], host_inputs[idx], stream)
120
+
121
+ context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
122
+
123
+ result = {}
124
+ for idx, output_name in enumerate(self.output_names):
125
+ # transfer predictions back from the GPU
126
+ cuda.memcpy_dtoh_async(host_outputs[idx], cuda_outputs[idx], stream)
127
+ result[output_name] = host_outputs[idx]
128
+
129
+ stream.synchronize()
130
+
131
+ return result
132
+
133
+
134
+ EXAMPLE_DOC_STRING = """
135
+ Examples:
136
+ ```py
137
+ >>> # !pip install opencv-python transformers accelerate
138
+ >>> from diffusers import StableDiffusionControlNetImg2ImgPipeline, ControlNetModel, UniPCMultistepScheduler
139
+ >>> from diffusers.utils import load_image
140
+ >>> import numpy as np
141
+ >>> import torch
142
+
143
+ >>> import cv2
144
+ >>> from PIL import Image
145
+
146
+ >>> # download an image
147
+ >>> image = load_image(
148
+ ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
149
+ ... )
150
+ >>> np_image = np.array(image)
151
+
152
+ >>> # get canny image
153
+ >>> np_image = cv2.Canny(np_image, 100, 200)
154
+ >>> np_image = np_image[:, :, None]
155
+ >>> np_image = np.concatenate([np_image, np_image, np_image], axis=2)
156
+ >>> canny_image = Image.fromarray(np_image)
157
+
158
+ >>> # load control net and stable diffusion v1-5
159
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
160
+ >>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
161
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
162
+ ... )
163
+
164
+ >>> # speed up diffusion process with faster scheduler and memory optimization
165
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
166
+ >>> pipe.enable_model_cpu_offload()
167
+
168
+ >>> # generate image
169
+ >>> generator = torch.manual_seed(0)
170
+ >>> image = pipe(
171
+ ... "futuristic-looking woman",
172
+ ... num_inference_steps=20,
173
+ ... generator=generator,
174
+ ... image=image,
175
+ ... control_image=canny_image,
176
+ ... ).images[0]
177
+ ```
178
+ """
179
+
180
+
181
+ def prepare_image(image):
182
+ if isinstance(image, torch.Tensor):
183
+ # Batch single image
184
+ if image.ndim == 3:
185
+ image = image.unsqueeze(0)
186
+
187
+ image = image.to(dtype=torch.float32)
188
+ else:
189
+ # preprocess image
190
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
191
+ image = [image]
192
+
193
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
194
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
195
+ image = np.concatenate(image, axis=0)
196
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
197
+ image = np.concatenate([i[None, :] for i in image], axis=0)
198
+
199
+ image = image.transpose(0, 3, 1, 2)
200
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
201
+
202
+ return image
203
+
204
+
205
+ class TensorRTStableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
206
+ vae_encoder: OnnxRuntimeModel
207
+ vae_decoder: OnnxRuntimeModel
208
+ text_encoder: OnnxRuntimeModel
209
+ tokenizer: CLIPTokenizer
210
+ unet: TensorRTModel
211
+ scheduler: KarrasDiffusionSchedulers
212
+
213
+ def __init__(
214
+ self,
215
+ vae_encoder: OnnxRuntimeModel,
216
+ vae_decoder: OnnxRuntimeModel,
217
+ text_encoder: OnnxRuntimeModel,
218
+ tokenizer: CLIPTokenizer,
219
+ unet: TensorRTModel,
220
+ scheduler: KarrasDiffusionSchedulers,
221
+ ):
222
+ super().__init__()
223
+
224
+ self.register_modules(
225
+ vae_encoder=vae_encoder,
226
+ vae_decoder=vae_decoder,
227
+ text_encoder=text_encoder,
228
+ tokenizer=tokenizer,
229
+ unet=unet,
230
+ scheduler=scheduler,
231
+ )
232
+ self.vae_scale_factor = 2 ** (4 - 1)
233
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
234
+ self.control_image_processor = VaeImageProcessor(
235
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
236
+ )
237
+
238
+ def _encode_prompt(
239
+ self,
240
+ prompt: Union[str, List[str]],
241
+ num_images_per_prompt: Optional[int],
242
+ do_classifier_free_guidance: bool,
243
+ negative_prompt: Optional[str],
244
+ prompt_embeds: Optional[np.ndarray] = None,
245
+ negative_prompt_embeds: Optional[np.ndarray] = None,
246
+ ):
247
+ r"""
248
+ Encodes the prompt into text encoder hidden states.
249
+
250
+ Args:
251
+ prompt (`str` or `List[str]`):
252
+ prompt to be encoded
253
+ num_images_per_prompt (`int`):
254
+ number of images that should be generated per prompt
255
+ do_classifier_free_guidance (`bool`):
256
+ whether to use classifier free guidance or not
257
+ negative_prompt (`str` or `List[str]`):
258
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
259
+ if `guidance_scale` is less than `1`).
260
+ prompt_embeds (`np.ndarray`, *optional*):
261
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
262
+ provided, text embeddings will be generated from `prompt` input argument.
263
+ negative_prompt_embeds (`np.ndarray`, *optional*):
264
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
265
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
266
+ argument.
267
+ """
268
+ if prompt is not None and isinstance(prompt, str):
269
+ batch_size = 1
270
+ elif prompt is not None and isinstance(prompt, list):
271
+ batch_size = len(prompt)
272
+ else:
273
+ batch_size = prompt_embeds.shape[0]
274
+
275
+ if prompt_embeds is None:
276
+ # get prompt text embeddings
277
+ text_inputs = self.tokenizer(
278
+ prompt,
279
+ padding="max_length",
280
+ max_length=self.tokenizer.model_max_length,
281
+ truncation=True,
282
+ return_tensors="np",
283
+ )
284
+ text_input_ids = text_inputs.input_ids
285
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids
286
+
287
+ if not np.array_equal(text_input_ids, untruncated_ids):
288
+ removed_text = self.tokenizer.batch_decode(
289
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
290
+ )
291
+ logger.warning(
292
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
293
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
294
+ )
295
+
296
+ prompt_embeds = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0]
297
+
298
+ prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
299
+
300
+ # get unconditional embeddings for classifier free guidance
301
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
302
+ uncond_tokens: List[str]
303
+ if negative_prompt is None:
304
+ uncond_tokens = [""] * batch_size
305
+ elif type(prompt) is not type(negative_prompt):
306
+ raise TypeError(
307
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
308
+ f" {type(prompt)}."
309
+ )
310
+ elif isinstance(negative_prompt, str):
311
+ uncond_tokens = [negative_prompt] * batch_size
312
+ elif batch_size != len(negative_prompt):
313
+ raise ValueError(
314
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
315
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
316
+ " the batch size of `prompt`."
317
+ )
318
+ else:
319
+ uncond_tokens = negative_prompt
320
+
321
+ max_length = prompt_embeds.shape[1]
322
+ uncond_input = self.tokenizer(
323
+ uncond_tokens,
324
+ padding="max_length",
325
+ max_length=max_length,
326
+ truncation=True,
327
+ return_tensors="np",
328
+ )
329
+ negative_prompt_embeds = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0]
330
+
331
+ if do_classifier_free_guidance:
332
+ negative_prompt_embeds = np.repeat(negative_prompt_embeds, num_images_per_prompt, axis=0)
333
+
334
+ # For classifier free guidance, we need to do two forward passes.
335
+ # Here we concatenate the unconditional and text embeddings into a single batch
336
+ # to avoid doing two forward passes
337
+ prompt_embeds = np.concatenate([negative_prompt_embeds, prompt_embeds])
338
+
339
+ return prompt_embeds
340
+
341
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
342
+ def decode_latents(self, latents):
343
+ warnings.warn(
344
+ "The decode_latents method is deprecated and will be removed in a future version. Please"
345
+ " use VaeImageProcessor instead",
346
+ FutureWarning,
347
+ )
348
+ latents = 1 / self.vae.config.scaling_factor * latents
349
+ image = self.vae.decode(latents, return_dict=False)[0]
350
+ image = (image / 2 + 0.5).clamp(0, 1)
351
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
352
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
353
+ return image
354
+
355
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
356
+ def prepare_extra_step_kwargs(self, generator, eta):
357
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
358
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
359
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
360
+ # and should be between [0, 1]
361
+
362
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
363
+ extra_step_kwargs = {}
364
+ if accepts_eta:
365
+ extra_step_kwargs["eta"] = eta
366
+
367
+ # check if the scheduler accepts generator
368
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
369
+ if accepts_generator:
370
+ extra_step_kwargs["generator"] = generator
371
+ return extra_step_kwargs
372
+
373
+ def check_inputs(
374
+ self,
375
+ num_controlnet,
376
+ prompt,
377
+ image,
378
+ callback_steps,
379
+ negative_prompt=None,
380
+ prompt_embeds=None,
381
+ negative_prompt_embeds=None,
382
+ controlnet_conditioning_scale=1.0,
383
+ control_guidance_start=0.0,
384
+ control_guidance_end=1.0,
385
+ ):
386
+ if (callback_steps is None) or (
387
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
388
+ ):
389
+ raise ValueError(
390
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
391
+ f" {type(callback_steps)}."
392
+ )
393
+
394
+ if prompt is not None and prompt_embeds is not None:
395
+ raise ValueError(
396
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
397
+ " only forward one of the two."
398
+ )
399
+ elif prompt is None and prompt_embeds is None:
400
+ raise ValueError(
401
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
402
+ )
403
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
404
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
405
+
406
+ if negative_prompt is not None and negative_prompt_embeds is not None:
407
+ raise ValueError(
408
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
409
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
410
+ )
411
+
412
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
413
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
414
+ raise ValueError(
415
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
416
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
417
+ f" {negative_prompt_embeds.shape}."
418
+ )
419
+
420
+ # Check `image`
421
+ if num_controlnet == 1:
422
+ self.check_image(image, prompt, prompt_embeds)
423
+ elif num_controlnet > 1:
424
+ if not isinstance(image, list):
425
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
426
+
427
+ # When `image` is a nested list:
428
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
429
+ elif any(isinstance(i, list) for i in image):
430
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
431
+ elif len(image) != num_controlnet:
432
+ raise ValueError(
433
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {num_controlnet} ControlNets."
434
+ )
435
+
436
+ for image_ in image:
437
+ self.check_image(image_, prompt, prompt_embeds)
438
+ else:
439
+ assert False
440
+
441
+ # Check `controlnet_conditioning_scale`
442
+ if num_controlnet == 1:
443
+ if not isinstance(controlnet_conditioning_scale, float):
444
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
445
+ elif num_controlnet > 1:
446
+ if isinstance(controlnet_conditioning_scale, list):
447
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
448
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
449
+ elif (
450
+ isinstance(controlnet_conditioning_scale, list)
451
+ and len(controlnet_conditioning_scale) != num_controlnet
452
+ ):
453
+ raise ValueError(
454
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
455
+ " the same length as the number of controlnets"
456
+ )
457
+ else:
458
+ assert False
459
+
460
+ if len(control_guidance_start) != len(control_guidance_end):
461
+ raise ValueError(
462
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
463
+ )
464
+
465
+ if num_controlnet > 1:
466
+ if len(control_guidance_start) != num_controlnet:
467
+ raise ValueError(
468
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {num_controlnet} controlnets available. Make sure to provide {num_controlnet}."
469
+ )
470
+
471
+ for start, end in zip(control_guidance_start, control_guidance_end):
472
+ if start >= end:
473
+ raise ValueError(
474
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
475
+ )
476
+ if start < 0.0:
477
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
478
+ if end > 1.0:
479
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
480
+
481
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
482
+ def check_image(self, image, prompt, prompt_embeds):
483
+ image_is_pil = isinstance(image, PIL.Image.Image)
484
+ image_is_tensor = isinstance(image, torch.Tensor)
485
+ image_is_np = isinstance(image, np.ndarray)
486
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
487
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
488
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
489
+
490
+ if (
491
+ not image_is_pil
492
+ and not image_is_tensor
493
+ and not image_is_np
494
+ and not image_is_pil_list
495
+ and not image_is_tensor_list
496
+ and not image_is_np_list
497
+ ):
498
+ raise TypeError(
499
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
500
+ )
501
+
502
+ if image_is_pil:
503
+ image_batch_size = 1
504
+ else:
505
+ image_batch_size = len(image)
506
+
507
+ if prompt is not None and isinstance(prompt, str):
508
+ prompt_batch_size = 1
509
+ elif prompt is not None and isinstance(prompt, list):
510
+ prompt_batch_size = len(prompt)
511
+ elif prompt_embeds is not None:
512
+ prompt_batch_size = prompt_embeds.shape[0]
513
+
514
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
515
+ raise ValueError(
516
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
517
+ )
518
+
519
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
520
+ def prepare_control_image(
521
+ self,
522
+ image,
523
+ width,
524
+ height,
525
+ batch_size,
526
+ num_images_per_prompt,
527
+ device,
528
+ dtype,
529
+ do_classifier_free_guidance=False,
530
+ guess_mode=False,
531
+ ):
532
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
533
+ image_batch_size = image.shape[0]
534
+
535
+ if image_batch_size == 1:
536
+ repeat_by = batch_size
537
+ else:
538
+ # image batch size is the same as prompt batch size
539
+ repeat_by = num_images_per_prompt
540
+
541
+ image = image.repeat_interleave(repeat_by, dim=0)
542
+
543
+ image = image.to(device=device, dtype=dtype)
544
+
545
+ if do_classifier_free_guidance and not guess_mode:
546
+ image = torch.cat([image] * 2)
547
+
548
+ return image
549
+
550
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
551
+ def get_timesteps(self, num_inference_steps, strength, device):
552
+ # get the original timestep using init_timestep
553
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
554
+
555
+ t_start = max(num_inference_steps - init_timestep, 0)
556
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
557
+
558
+ return timesteps, num_inference_steps - t_start
559
+
560
+ def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
561
+ if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
562
+ raise ValueError(
563
+ f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
564
+ )
565
+
566
+ image = image.to(device=device, dtype=dtype)
567
+
568
+ batch_size = batch_size * num_images_per_prompt
569
+
570
+ if image.shape[1] == 4:
571
+ init_latents = image
572
+
573
+ else:
574
+ _image = image.cpu().detach().numpy()
575
+ init_latents = self.vae_encoder(sample=_image)[0]
576
+ init_latents = torch.from_numpy(init_latents).to(device=device, dtype=dtype)
577
+ init_latents = 0.18215 * init_latents
578
+
579
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
580
+ # expand init_latents for batch_size
581
+ deprecation_message = (
582
+ f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
583
+ " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
584
+ " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
585
+ " your script to pass as many initial images as text prompts to suppress this warning."
586
+ )
587
+ deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
588
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
589
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
590
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
591
+ raise ValueError(
592
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
593
+ )
594
+ else:
595
+ init_latents = torch.cat([init_latents], dim=0)
596
+
597
+ shape = init_latents.shape
598
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
599
+
600
+ # get latents
601
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
602
+ latents = init_latents
603
+
604
+ return latents
605
+
606
+ @torch.no_grad()
607
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
608
+ def __call__(
609
+ self,
610
+ num_controlnet: int,
611
+ fp16: bool = True,
612
+ prompt: Union[str, List[str]] = None,
613
+ image: Union[
614
+ torch.FloatTensor,
615
+ PIL.Image.Image,
616
+ np.ndarray,
617
+ List[torch.FloatTensor],
618
+ List[PIL.Image.Image],
619
+ List[np.ndarray],
620
+ ] = None,
621
+ control_image: Union[
622
+ torch.FloatTensor,
623
+ PIL.Image.Image,
624
+ np.ndarray,
625
+ List[torch.FloatTensor],
626
+ List[PIL.Image.Image],
627
+ List[np.ndarray],
628
+ ] = None,
629
+ height: Optional[int] = None,
630
+ width: Optional[int] = None,
631
+ strength: float = 0.8,
632
+ num_inference_steps: int = 50,
633
+ guidance_scale: float = 7.5,
634
+ negative_prompt: Optional[Union[str, List[str]]] = None,
635
+ num_images_per_prompt: Optional[int] = 1,
636
+ eta: float = 0.0,
637
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
638
+ latents: Optional[torch.FloatTensor] = None,
639
+ prompt_embeds: Optional[torch.FloatTensor] = None,
640
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
641
+ output_type: Optional[str] = "pil",
642
+ return_dict: bool = True,
643
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
644
+ callback_steps: int = 1,
645
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
646
+ controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
647
+ guess_mode: bool = False,
648
+ control_guidance_start: Union[float, List[float]] = 0.0,
649
+ control_guidance_end: Union[float, List[float]] = 1.0,
650
+ ):
651
+ r"""
652
+ Function invoked when calling the pipeline for generation.
653
+
654
+ Args:
655
+ prompt (`str` or `List[str]`, *optional*):
656
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
657
+ instead.
658
+ image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
659
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
660
+ The initial image will be used as the starting point for the image generation process. Can also accept
661
+ image latents as `image`, if passing latents directly, it will not be encoded again.
662
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
663
+ `List[List[torch.FloatTensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
664
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
665
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can
666
+ also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If
667
+ height and/or width are passed, `image` is resized according to them. If multiple ControlNets are
668
+ specified in init, images must be passed as a list such that each element of the list can be correctly
669
+ batched for input to a single controlnet.
670
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
671
+ The height in pixels of the generated image.
672
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
673
+ The width in pixels of the generated image.
674
+ num_inference_steps (`int`, *optional*, defaults to 50):
675
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
676
+ expense of slower inference.
677
+ guidance_scale (`float`, *optional*, defaults to 7.5):
678
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
679
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
680
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
681
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
682
+ usually at the expense of lower image quality.
683
+ negative_prompt (`str` or `List[str]`, *optional*):
684
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
685
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
686
+ less than `1`).
687
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
688
+ The number of images to generate per prompt.
689
+ eta (`float`, *optional*, defaults to 0.0):
690
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
691
+ [`schedulers.DDIMScheduler`], will be ignored for others.
692
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
693
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
694
+ to make generation deterministic.
695
+ latents (`torch.FloatTensor`, *optional*):
696
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
697
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
698
+ tensor will ge generated by sampling using the supplied random `generator`.
699
+ prompt_embeds (`torch.FloatTensor`, *optional*):
700
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
701
+ provided, text embeddings will be generated from `prompt` input argument.
702
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
703
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
704
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
705
+ argument.
706
+ output_type (`str`, *optional*, defaults to `"pil"`):
707
+ The output format of the generate image. Choose between
708
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
709
+ return_dict (`bool`, *optional*, defaults to `True`):
710
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
711
+ plain tuple.
712
+ callback (`Callable`, *optional*):
713
+ A function that will be called every `callback_steps` steps during inference. The function will be
714
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
715
+ callback_steps (`int`, *optional*, defaults to 1):
716
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
717
+ called at every step.
718
+ cross_attention_kwargs (`dict`, *optional*):
719
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
720
+ `self.processor` in
721
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
722
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
723
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
724
+ to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
725
+ corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
726
+ than for [`~StableDiffusionControlNetPipeline.__call__`].
727
+ guess_mode (`bool`, *optional*, defaults to `False`):
728
+ In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
729
+ you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
730
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
731
+ The percentage of total steps at which the controlnet starts applying.
732
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
733
+ The percentage of total steps at which the controlnet stops applying.
734
+
735
+ Examples:
736
+
737
+ Returns:
738
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
739
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
740
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
741
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
742
+ (nsfw) content, according to the `safety_checker`.
743
+ """
744
+ if fp16:
745
+ torch_dtype = torch.float16
746
+ np_dtype = np.float16
747
+ else:
748
+ torch_dtype = torch.float32
749
+ np_dtype = np.float32
750
+
751
+ # align format for control guidance
752
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
753
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
754
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
755
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
756
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
757
+ mult = num_controlnet
758
+ control_guidance_start, control_guidance_end = (
759
+ mult * [control_guidance_start],
760
+ mult * [control_guidance_end],
761
+ )
762
+
763
+ # 1. Check inputs. Raise error if not correct
764
+ self.check_inputs(
765
+ num_controlnet,
766
+ prompt,
767
+ control_image,
768
+ callback_steps,
769
+ negative_prompt,
770
+ prompt_embeds,
771
+ negative_prompt_embeds,
772
+ controlnet_conditioning_scale,
773
+ control_guidance_start,
774
+ control_guidance_end,
775
+ )
776
+
777
+ # 2. Define call parameters
778
+ if prompt is not None and isinstance(prompt, str):
779
+ batch_size = 1
780
+ elif prompt is not None and isinstance(prompt, list):
781
+ batch_size = len(prompt)
782
+ else:
783
+ batch_size = prompt_embeds.shape[0]
784
+
785
+ device = self._execution_device
786
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
787
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
788
+ # corresponds to doing no classifier free guidance.
789
+ do_classifier_free_guidance = guidance_scale > 1.0
790
+
791
+ if num_controlnet > 1 and isinstance(controlnet_conditioning_scale, float):
792
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * num_controlnet
793
+
794
+ # 3. Encode input prompt
795
+ prompt_embeds = self._encode_prompt(
796
+ prompt,
797
+ num_images_per_prompt,
798
+ do_classifier_free_guidance,
799
+ negative_prompt,
800
+ prompt_embeds=prompt_embeds,
801
+ negative_prompt_embeds=negative_prompt_embeds,
802
+ )
803
+ # 4. Prepare image
804
+ image = self.image_processor.preprocess(image).to(dtype=torch.float32)
805
+
806
+ # 5. Prepare controlnet_conditioning_image
807
+ if num_controlnet == 1:
808
+ control_image = self.prepare_control_image(
809
+ image=control_image,
810
+ width=width,
811
+ height=height,
812
+ batch_size=batch_size * num_images_per_prompt,
813
+ num_images_per_prompt=num_images_per_prompt,
814
+ device=device,
815
+ dtype=torch_dtype,
816
+ do_classifier_free_guidance=do_classifier_free_guidance,
817
+ guess_mode=guess_mode,
818
+ )
819
+ elif num_controlnet > 1:
820
+ control_images = []
821
+
822
+ for control_image_ in control_image:
823
+ control_image_ = self.prepare_control_image(
824
+ image=control_image_,
825
+ width=width,
826
+ height=height,
827
+ batch_size=batch_size * num_images_per_prompt,
828
+ num_images_per_prompt=num_images_per_prompt,
829
+ device=device,
830
+ dtype=torch_dtype,
831
+ do_classifier_free_guidance=do_classifier_free_guidance,
832
+ guess_mode=guess_mode,
833
+ )
834
+
835
+ control_images.append(control_image_)
836
+
837
+ control_image = control_images
838
+ else:
839
+ assert False
840
+
841
+ # 5. Prepare timesteps
842
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
843
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
844
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
845
+
846
+ # 6. Prepare latent variables
847
+ latents = self.prepare_latents(
848
+ image,
849
+ latent_timestep,
850
+ batch_size,
851
+ num_images_per_prompt,
852
+ torch_dtype,
853
+ device,
854
+ generator,
855
+ )
856
+
857
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
858
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
859
+
860
+ # 7.1 Create tensor stating which controlnets to keep
861
+ controlnet_keep = []
862
+ for i in range(len(timesteps)):
863
+ keeps = [
864
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
865
+ for s, e in zip(control_guidance_start, control_guidance_end)
866
+ ]
867
+ controlnet_keep.append(keeps[0] if num_controlnet == 1 else keeps)
868
+
869
+ # 8. Denoising loop
870
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
871
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
872
+ for i, t in enumerate(timesteps):
873
+ # expand the latents if we are doing classifier free guidance
874
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
875
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
876
+
877
+ if isinstance(controlnet_keep[i], list):
878
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
879
+ else:
880
+ controlnet_cond_scale = controlnet_conditioning_scale
881
+ if isinstance(controlnet_cond_scale, list):
882
+ controlnet_cond_scale = controlnet_cond_scale[0]
883
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
884
+
885
+ # predict the noise residual
886
+ _latent_model_input = latent_model_input.cpu().detach().numpy()
887
+ _prompt_embeds = np.array(prompt_embeds, dtype=np_dtype)
888
+ _t = np.array([t.cpu().detach().numpy()], dtype=np_dtype)
889
+
890
+ if num_controlnet == 1:
891
+ control_images = np.array([control_image], dtype=np_dtype)
892
+ else:
893
+ control_images = []
894
+ for _control_img in control_image:
895
+ _control_img = _control_img.cpu().detach().numpy()
896
+ control_images.append(_control_img)
897
+ control_images = np.array(control_images, dtype=np_dtype)
898
+
899
+ control_scales = np.array(cond_scale, dtype=np_dtype)
900
+ control_scales = np.resize(control_scales, (num_controlnet, 1))
901
+
902
+ noise_pred = self.unet(
903
+ sample=_latent_model_input,
904
+ timestep=_t,
905
+ encoder_hidden_states=_prompt_embeds,
906
+ controlnet_conds=control_images,
907
+ conditioning_scales=control_scales,
908
+ )["noise_pred"]
909
+ noise_pred = torch.from_numpy(noise_pred).to(device)
910
+
911
+ # perform guidance
912
+ if do_classifier_free_guidance:
913
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
914
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
915
+
916
+ # compute the previous noisy sample x_t -> x_t-1
917
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
918
+
919
+ # call the callback, if provided
920
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
921
+ progress_bar.update()
922
+ if callback is not None and i % callback_steps == 0:
923
+ step_idx = i // getattr(self.scheduler, "order", 1)
924
+ callback(step_idx, t, latents)
925
+
926
+ if not output_type == "latent":
927
+ _latents = latents.cpu().detach().numpy() / 0.18215
928
+ _latents = np.array(_latents, dtype=np_dtype)
929
+ image = self.vae_decoder(latent_sample=_latents)[0]
930
+ image = torch.from_numpy(image).to(device, dtype=torch.float32)
931
+ has_nsfw_concept = None
932
+ else:
933
+ image = latents
934
+ has_nsfw_concept = None
935
+
936
+ if has_nsfw_concept is None:
937
+ do_denormalize = [True] * image.shape[0]
938
+ else:
939
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
940
+
941
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
942
+
943
+ if not return_dict:
944
+ return (image, has_nsfw_concept)
945
+
946
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
947
+
948
+
949
+ if __name__ == "__main__":
950
+ parser = argparse.ArgumentParser()
951
+
952
+ parser.add_argument(
953
+ "--sd_model",
954
+ type=str,
955
+ required=True,
956
+ help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
957
+ )
958
+
959
+ parser.add_argument(
960
+ "--onnx_model_dir",
961
+ type=str,
962
+ required=True,
963
+ help="Path to the ONNX directory",
964
+ )
965
+
966
+ parser.add_argument(
967
+ "--unet_engine_path",
968
+ type=str,
969
+ required=True,
970
+ help="Path to the unet + controlnet tensorrt model",
971
+ )
972
+
973
+ parser.add_argument("--qr_img_path", type=str, required=True, help="Path to the qr code image")
974
+
975
+ args = parser.parse_args()
976
+
977
+ qr_image = Image.open(args.qr_img_path)
978
+ qr_image = qr_image.resize((512, 512))
979
+
980
+ # init stable diffusion pipeline
981
+ pipeline = StableDiffusionImg2ImgPipeline.from_pretrained(args.sd_model)
982
+ pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
983
+
984
+ provider = ["CUDAExecutionProvider", "CPUExecutionProvider"]
985
+ onnx_pipeline = TensorRTStableDiffusionControlNetImg2ImgPipeline(
986
+ vae_encoder=OnnxRuntimeModel.from_pretrained(
987
+ os.path.join(args.onnx_model_dir, "vae_encoder"), provider=provider
988
+ ),
989
+ vae_decoder=OnnxRuntimeModel.from_pretrained(
990
+ os.path.join(args.onnx_model_dir, "vae_decoder"), provider=provider
991
+ ),
992
+ text_encoder=OnnxRuntimeModel.from_pretrained(
993
+ os.path.join(args.onnx_model_dir, "text_encoder"), provider=provider
994
+ ),
995
+ tokenizer=pipeline.tokenizer,
996
+ unet=TensorRTModel(args.unet_engine_path),
997
+ scheduler=pipeline.scheduler,
998
+ )
999
+ onnx_pipeline = onnx_pipeline.to("cuda")
1000
+
1001
+ prompt = "a cute cat fly to the moon"
1002
+ negative_prompt = "paintings, sketches, worst quality, low quality, normal quality, lowres, normal quality, monochrome, grayscale, skin spots, acnes, skin blemishes, age spot, glans, nsfw, nipples, necklace, worst quality, low quality, watermark, username, signature, multiple breasts, lowres, bad anatomy, bad hands, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, bad feet, single color, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, disfigured, bad anatomy, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, mutated hands, fused fingers, too many fingers, long neck, bad body perspect"
1003
+
1004
+ for i in range(10):
1005
+ start_time = time.time()
1006
+ image = onnx_pipeline(
1007
+ num_controlnet=2,
1008
+ prompt=prompt,
1009
+ negative_prompt=negative_prompt,
1010
+ image=qr_image,
1011
+ control_image=[qr_image, qr_image],
1012
+ width=512,
1013
+ height=512,
1014
+ strength=0.75,
1015
+ num_inference_steps=20,
1016
+ num_images_per_prompt=1,
1017
+ controlnet_conditioning_scale=[0.8, 0.8],
1018
+ control_guidance_start=[0.3, 0.3],
1019
+ control_guidance_end=[0.9, 0.9],
1020
+ ).images[0]
1021
+ print(time.time() - start_time)
1022
+ image.save("output_qr_code.png")
v0.26.3/scheduling_ufogen.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 UC Berkeley Team and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
16
+
17
+ import math
18
+ from dataclasses import dataclass
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+
24
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
25
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
26
+ from diffusers.utils import BaseOutput
27
+ from diffusers.utils.torch_utils import randn_tensor
28
+
29
+
30
+ @dataclass
31
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UFOGen
32
+ class UFOGenSchedulerOutput(BaseOutput):
33
+ """
34
+ Output class for the scheduler's `step` function output.
35
+
36
+ Args:
37
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
38
+ Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
39
+ denoising loop.
40
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
41
+ The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
42
+ `pred_original_sample` can be used to preview progress or for guidance.
43
+ """
44
+
45
+ prev_sample: torch.FloatTensor
46
+ pred_original_sample: Optional[torch.FloatTensor] = None
47
+
48
+
49
+ # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
50
+ def betas_for_alpha_bar(
51
+ num_diffusion_timesteps,
52
+ max_beta=0.999,
53
+ alpha_transform_type="cosine",
54
+ ):
55
+ """
56
+ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
57
+ (1-beta) over time from t = [0,1].
58
+
59
+ Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
60
+ to that part of the diffusion process.
61
+
62
+
63
+ Args:
64
+ num_diffusion_timesteps (`int`): the number of betas to produce.
65
+ max_beta (`float`): the maximum beta to use; use values lower than 1 to
66
+ prevent singularities.
67
+ alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
68
+ Choose from `cosine` or `exp`
69
+
70
+ Returns:
71
+ betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
72
+ """
73
+ if alpha_transform_type == "cosine":
74
+
75
+ def alpha_bar_fn(t):
76
+ return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
77
+
78
+ elif alpha_transform_type == "exp":
79
+
80
+ def alpha_bar_fn(t):
81
+ return math.exp(t * -12.0)
82
+
83
+ else:
84
+ raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
85
+
86
+ betas = []
87
+ for i in range(num_diffusion_timesteps):
88
+ t1 = i / num_diffusion_timesteps
89
+ t2 = (i + 1) / num_diffusion_timesteps
90
+ betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
91
+ return torch.tensor(betas, dtype=torch.float32)
92
+
93
+
94
+ # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
95
+ def rescale_zero_terminal_snr(betas):
96
+ """
97
+ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
98
+
99
+
100
+ Args:
101
+ betas (`torch.FloatTensor`):
102
+ the betas that the scheduler is being initialized with.
103
+
104
+ Returns:
105
+ `torch.FloatTensor`: rescaled betas with zero terminal SNR
106
+ """
107
+ # Convert betas to alphas_bar_sqrt
108
+ alphas = 1.0 - betas
109
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
110
+ alphas_bar_sqrt = alphas_cumprod.sqrt()
111
+
112
+ # Store old values.
113
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
114
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
115
+
116
+ # Shift so the last timestep is zero.
117
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
118
+
119
+ # Scale so the first timestep is back to the old value.
120
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
121
+
122
+ # Convert alphas_bar_sqrt to betas
123
+ alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
124
+ alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
125
+ alphas = torch.cat([alphas_bar[0:1], alphas])
126
+ betas = 1 - alphas
127
+
128
+ return betas
129
+
130
+
131
+ class UFOGenScheduler(SchedulerMixin, ConfigMixin):
132
+ """
133
+ `UFOGenScheduler` implements multistep and onestep sampling for a UFOGen model, introduced in
134
+ [UFOGen: You Forward Once Large Scale Text-to-Image Generation via Diffusion GANs](https://arxiv.org/abs/2311.09257)
135
+ by Yanwu Xu, Yang Zhao, Zhisheng Xiao, and Tingbo Hou. UFOGen is a varianet of the denoising diffusion GAN (DDGAN)
136
+ model designed for one-step sampling.
137
+
138
+ This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
139
+ methods the library implements for all schedulers such as loading and saving.
140
+
141
+ Args:
142
+ num_train_timesteps (`int`, defaults to 1000):
143
+ The number of diffusion steps to train the model.
144
+ beta_start (`float`, defaults to 0.0001):
145
+ The starting `beta` value of inference.
146
+ beta_end (`float`, defaults to 0.02):
147
+ The final `beta` value.
148
+ beta_schedule (`str`, defaults to `"linear"`):
149
+ The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
150
+ `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
151
+ clip_sample (`bool`, defaults to `True`):
152
+ Clip the predicted sample for numerical stability.
153
+ clip_sample_range (`float`, defaults to 1.0):
154
+ The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
155
+ set_alpha_to_one (`bool`, defaults to `True`):
156
+ Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
157
+ there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
158
+ otherwise it uses the alpha value at step 0.
159
+ prediction_type (`str`, defaults to `epsilon`, *optional*):
160
+ Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
161
+ `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
162
+ Video](https://imagen.research.google/video/paper.pdf) paper).
163
+ thresholding (`bool`, defaults to `False`):
164
+ Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
165
+ as Stable Diffusion.
166
+ dynamic_thresholding_ratio (`float`, defaults to 0.995):
167
+ The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
168
+ sample_max_value (`float`, defaults to 1.0):
169
+ The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
170
+ timestep_spacing (`str`, defaults to `"leading"`):
171
+ The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
172
+ Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
173
+ steps_offset (`int`, defaults to 0):
174
+ An offset added to the inference steps. You can use a combination of `offset=1` and
175
+ `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable
176
+ Diffusion.
177
+ rescale_betas_zero_snr (`bool`, defaults to `False`):
178
+ Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
179
+ dark samples instead of limiting it to samples with medium brightness. Loosely related to
180
+ [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
181
+ denoising_step_size (`int`, defaults to 250):
182
+ The denoising step size parameter from the UFOGen paper. The number of steps used for training is roughly
183
+ `math.ceil(num_train_timesteps / denoising_step_size)`.
184
+ """
185
+
186
+ order = 1
187
+
188
+ @register_to_config
189
+ def __init__(
190
+ self,
191
+ num_train_timesteps: int = 1000,
192
+ beta_start: float = 0.0001,
193
+ beta_end: float = 0.02,
194
+ beta_schedule: str = "linear",
195
+ trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
196
+ clip_sample: bool = True,
197
+ set_alpha_to_one: bool = True,
198
+ prediction_type: str = "epsilon",
199
+ thresholding: bool = False,
200
+ dynamic_thresholding_ratio: float = 0.995,
201
+ clip_sample_range: float = 1.0,
202
+ sample_max_value: float = 1.0,
203
+ timestep_spacing: str = "leading",
204
+ steps_offset: int = 0,
205
+ rescale_betas_zero_snr: bool = False,
206
+ denoising_step_size: int = 250,
207
+ ):
208
+ if trained_betas is not None:
209
+ self.betas = torch.tensor(trained_betas, dtype=torch.float32)
210
+ elif beta_schedule == "linear":
211
+ self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
212
+ elif beta_schedule == "scaled_linear":
213
+ # this schedule is very specific to the latent diffusion model.
214
+ self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
215
+ elif beta_schedule == "squaredcos_cap_v2":
216
+ # Glide cosine schedule
217
+ self.betas = betas_for_alpha_bar(num_train_timesteps)
218
+ elif beta_schedule == "sigmoid":
219
+ # GeoDiff sigmoid schedule
220
+ betas = torch.linspace(-6, 6, num_train_timesteps)
221
+ self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start
222
+ else:
223
+ raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
224
+
225
+ # Rescale for zero SNR
226
+ if rescale_betas_zero_snr:
227
+ self.betas = rescale_zero_terminal_snr(self.betas)
228
+
229
+ self.alphas = 1.0 - self.betas
230
+ self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
231
+
232
+ # For the final step, there is no previous alphas_cumprod because we are already at 0
233
+ # `set_alpha_to_one` decides whether we set this parameter simply to one or
234
+ # whether we use the final alpha of the "non-previous" one.
235
+ self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
236
+
237
+ # standard deviation of the initial noise distribution
238
+ self.init_noise_sigma = 1.0
239
+
240
+ # setable values
241
+ self.custom_timesteps = False
242
+ self.num_inference_steps = None
243
+ self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy())
244
+
245
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
246
+ """
247
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
248
+ current timestep.
249
+
250
+ Args:
251
+ sample (`torch.FloatTensor`):
252
+ The input sample.
253
+ timestep (`int`, *optional*):
254
+ The current timestep in the diffusion chain.
255
+
256
+ Returns:
257
+ `torch.FloatTensor`:
258
+ A scaled input sample.
259
+ """
260
+ return sample
261
+
262
+ def set_timesteps(
263
+ self,
264
+ num_inference_steps: Optional[int] = None,
265
+ device: Union[str, torch.device] = None,
266
+ timesteps: Optional[List[int]] = None,
267
+ ):
268
+ """
269
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
270
+
271
+ Args:
272
+ num_inference_steps (`int`):
273
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
274
+ `timesteps` must be `None`.
275
+ device (`str` or `torch.device`, *optional*):
276
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
277
+ timesteps (`List[int]`, *optional*):
278
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
279
+ timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed,
280
+ `num_inference_steps` must be `None`.
281
+
282
+ """
283
+ if num_inference_steps is not None and timesteps is not None:
284
+ raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.")
285
+
286
+ if timesteps is not None:
287
+ for i in range(1, len(timesteps)):
288
+ if timesteps[i] >= timesteps[i - 1]:
289
+ raise ValueError("`custom_timesteps` must be in descending order.")
290
+
291
+ if timesteps[0] >= self.config.num_train_timesteps:
292
+ raise ValueError(
293
+ f"`timesteps` must start before `self.config.train_timesteps`:"
294
+ f" {self.config.num_train_timesteps}."
295
+ )
296
+
297
+ timesteps = np.array(timesteps, dtype=np.int64)
298
+ self.custom_timesteps = True
299
+ else:
300
+ if num_inference_steps > self.config.num_train_timesteps:
301
+ raise ValueError(
302
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
303
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
304
+ f" maximal {self.config.num_train_timesteps} timesteps."
305
+ )
306
+
307
+ self.num_inference_steps = num_inference_steps
308
+ self.custom_timesteps = False
309
+
310
+ # TODO: For now, handle special case when num_inference_steps == 1 separately
311
+ if num_inference_steps == 1:
312
+ # Set the timestep schedule to num_train_timesteps - 1 rather than 0
313
+ # (that is, the one-step timestep schedule is always trailing rather than leading or linspace)
314
+ timesteps = np.array([self.config.num_train_timesteps - 1], dtype=np.int64)
315
+ else:
316
+ # TODO: For now, retain the DDPM timestep spacing logic
317
+ # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
318
+ if self.config.timestep_spacing == "linspace":
319
+ timesteps = (
320
+ np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps)
321
+ .round()[::-1]
322
+ .copy()
323
+ .astype(np.int64)
324
+ )
325
+ elif self.config.timestep_spacing == "leading":
326
+ step_ratio = self.config.num_train_timesteps // self.num_inference_steps
327
+ # creates integer timesteps by multiplying by ratio
328
+ # casting to int to avoid issues when num_inference_step is power of 3
329
+ timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
330
+ timesteps += self.config.steps_offset
331
+ elif self.config.timestep_spacing == "trailing":
332
+ step_ratio = self.config.num_train_timesteps / self.num_inference_steps
333
+ # creates integer timesteps by multiplying by ratio
334
+ # casting to int to avoid issues when num_inference_step is power of 3
335
+ timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64)
336
+ timesteps -= 1
337
+ else:
338
+ raise ValueError(
339
+ f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
340
+ )
341
+
342
+ self.timesteps = torch.from_numpy(timesteps).to(device)
343
+
344
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
345
+ def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
346
+ """
347
+ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
348
+ prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
349
+ s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
350
+ pixels from saturation at each step. We find that dynamic thresholding results in significantly better
351
+ photorealism as well as better image-text alignment, especially when using very large guidance weights."
352
+
353
+ https://arxiv.org/abs/2205.11487
354
+ """
355
+ dtype = sample.dtype
356
+ batch_size, channels, *remaining_dims = sample.shape
357
+
358
+ if dtype not in (torch.float32, torch.float64):
359
+ sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
360
+
361
+ # Flatten sample for doing quantile calculation along each image
362
+ sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
363
+
364
+ abs_sample = sample.abs() # "a certain percentile absolute pixel value"
365
+
366
+ s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
367
+ s = torch.clamp(
368
+ s, min=1, max=self.config.sample_max_value
369
+ ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
370
+ s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
371
+ sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
372
+
373
+ sample = sample.reshape(batch_size, channels, *remaining_dims)
374
+ sample = sample.to(dtype)
375
+
376
+ return sample
377
+
378
+ def step(
379
+ self,
380
+ model_output: torch.FloatTensor,
381
+ timestep: int,
382
+ sample: torch.FloatTensor,
383
+ generator: Optional[torch.Generator] = None,
384
+ return_dict: bool = True,
385
+ ) -> Union[UFOGenSchedulerOutput, Tuple]:
386
+ """
387
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
388
+ process from the learned model outputs (most often the predicted noise).
389
+
390
+ Args:
391
+ model_output (`torch.FloatTensor`):
392
+ The direct output from learned diffusion model.
393
+ timestep (`float`):
394
+ The current discrete timestep in the diffusion chain.
395
+ sample (`torch.FloatTensor`):
396
+ A current instance of a sample created by the diffusion process.
397
+ generator (`torch.Generator`, *optional*):
398
+ A random number generator.
399
+ return_dict (`bool`, *optional*, defaults to `True`):
400
+ Whether or not to return a [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] or `tuple`.
401
+
402
+ Returns:
403
+ [`~schedulers.scheduling_ddpm.UFOGenSchedulerOutput`] or `tuple`:
404
+ If return_dict is `True`, [`~schedulers.scheduling_ufogen.UFOGenSchedulerOutput`] is returned, otherwise a
405
+ tuple is returned where the first element is the sample tensor.
406
+
407
+ """
408
+ # 0. Resolve timesteps
409
+ t = timestep
410
+ prev_t = self.previous_timestep(t)
411
+
412
+ # 1. compute alphas, betas
413
+ alpha_prod_t = self.alphas_cumprod[t]
414
+ alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.final_alpha_cumprod
415
+ beta_prod_t = 1 - alpha_prod_t
416
+ # beta_prod_t_prev = 1 - alpha_prod_t_prev
417
+ # current_alpha_t = alpha_prod_t / alpha_prod_t_prev
418
+ # current_beta_t = 1 - current_alpha_t
419
+
420
+ # 2. compute predicted original sample from predicted noise also called
421
+ # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
422
+ if self.config.prediction_type == "epsilon":
423
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
424
+ elif self.config.prediction_type == "sample":
425
+ pred_original_sample = model_output
426
+ elif self.config.prediction_type == "v_prediction":
427
+ pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
428
+ else:
429
+ raise ValueError(
430
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or"
431
+ " `v_prediction` for UFOGenScheduler."
432
+ )
433
+
434
+ # 3. Clip or threshold "predicted x_0"
435
+ if self.config.thresholding:
436
+ pred_original_sample = self._threshold_sample(pred_original_sample)
437
+ elif self.config.clip_sample:
438
+ pred_original_sample = pred_original_sample.clamp(
439
+ -self.config.clip_sample_range, self.config.clip_sample_range
440
+ )
441
+
442
+ # 4. Single-step or multi-step sampling
443
+ # Noise is not used on the final timestep of the timestep schedule.
444
+ # This also means that noise is not used for one-step sampling.
445
+ if t != self.timesteps[-1]:
446
+ # TODO: is this correct?
447
+ # Sample prev sample x_{t - 1} ~ q(x_{t - 1} | x_0 = G(x_t, t))
448
+ device = model_output.device
449
+ noise = randn_tensor(model_output.shape, generator=generator, device=device, dtype=model_output.dtype)
450
+ sqrt_alpha_prod_t_prev = alpha_prod_t_prev**0.5
451
+ sqrt_one_minus_alpha_prod_t_prev = (1 - alpha_prod_t_prev) ** 0.5
452
+ pred_prev_sample = sqrt_alpha_prod_t_prev * pred_original_sample + sqrt_one_minus_alpha_prod_t_prev * noise
453
+ else:
454
+ # Simply return the pred_original_sample. If `prediction_type == "sample"`, this is equivalent to returning
455
+ # the output of the GAN generator U-Net on the initial noisy latents x_T ~ N(0, I).
456
+ pred_prev_sample = pred_original_sample
457
+
458
+ if not return_dict:
459
+ return (pred_prev_sample,)
460
+
461
+ return UFOGenSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
462
+
463
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
464
+ def add_noise(
465
+ self,
466
+ original_samples: torch.FloatTensor,
467
+ noise: torch.FloatTensor,
468
+ timesteps: torch.IntTensor,
469
+ ) -> torch.FloatTensor:
470
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
471
+ alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
472
+ timesteps = timesteps.to(original_samples.device)
473
+
474
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
475
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
476
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
477
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
478
+
479
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
480
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
481
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
482
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
483
+
484
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
485
+ return noisy_samples
486
+
487
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
488
+ def get_velocity(
489
+ self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
490
+ ) -> torch.FloatTensor:
491
+ # Make sure alphas_cumprod and timestep have same device and dtype as sample
492
+ alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
493
+ timesteps = timesteps.to(sample.device)
494
+
495
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
496
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
497
+ while len(sqrt_alpha_prod.shape) < len(sample.shape):
498
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
499
+
500
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
501
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
502
+ while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
503
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
504
+
505
+ velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
506
+ return velocity
507
+
508
+ def __len__(self):
509
+ return self.config.num_train_timesteps
510
+
511
+ # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep
512
+ def previous_timestep(self, timestep):
513
+ if self.custom_timesteps:
514
+ index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0]
515
+ if index == self.timesteps.shape[0] - 1:
516
+ prev_t = torch.tensor(-1)
517
+ else:
518
+ prev_t = self.timesteps[index + 1]
519
+ else:
520
+ num_inference_steps = (
521
+ self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps
522
+ )
523
+ prev_t = timestep - self.config.num_train_timesteps // num_inference_steps
524
+
525
+ return prev_t
v0.26.3/sd_text2img_k_diffusion.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import warnings
17
+ from typing import Callable, List, Optional, Union
18
+
19
+ import torch
20
+ from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
21
+
22
+ from diffusers import DiffusionPipeline, LMSDiscreteScheduler
23
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
24
+ from diffusers.utils import is_accelerate_available, logging
25
+
26
+
27
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
28
+
29
+
30
+ class ModelWrapper:
31
+ def __init__(self, model, alphas_cumprod):
32
+ self.model = model
33
+ self.alphas_cumprod = alphas_cumprod
34
+
35
+ def apply_model(self, *args, **kwargs):
36
+ if len(args) == 3:
37
+ encoder_hidden_states = args[-1]
38
+ args = args[:2]
39
+ if kwargs.get("cond", None) is not None:
40
+ encoder_hidden_states = kwargs.pop("cond")
41
+ return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample
42
+
43
+
44
+ class StableDiffusionPipeline(DiffusionPipeline):
45
+ r"""
46
+ Pipeline for text-to-image generation using Stable Diffusion.
47
+
48
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
49
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
50
+
51
+ Args:
52
+ vae ([`AutoencoderKL`]):
53
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
54
+ text_encoder ([`CLIPTextModel`]):
55
+ Frozen text-encoder. Stable Diffusion uses the text portion of
56
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
57
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
58
+ tokenizer (`CLIPTokenizer`):
59
+ Tokenizer of class
60
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
61
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
62
+ scheduler ([`SchedulerMixin`]):
63
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
64
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
65
+ safety_checker ([`StableDiffusionSafetyChecker`]):
66
+ Classification module that estimates whether generated images could be considered offensive or harmful.
67
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
68
+ feature_extractor ([`CLIPImageProcessor`]):
69
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
70
+ """
71
+
72
+ _optional_components = ["safety_checker", "feature_extractor"]
73
+
74
+ def __init__(
75
+ self,
76
+ vae,
77
+ text_encoder,
78
+ tokenizer,
79
+ unet,
80
+ scheduler,
81
+ safety_checker,
82
+ feature_extractor,
83
+ ):
84
+ super().__init__()
85
+
86
+ if safety_checker is None:
87
+ logger.warning(
88
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
89
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
90
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
91
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
92
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
93
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
94
+ )
95
+
96
+ # get correct sigmas from LMS
97
+ scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
98
+ self.register_modules(
99
+ vae=vae,
100
+ text_encoder=text_encoder,
101
+ tokenizer=tokenizer,
102
+ unet=unet,
103
+ scheduler=scheduler,
104
+ safety_checker=safety_checker,
105
+ feature_extractor=feature_extractor,
106
+ )
107
+
108
+ model = ModelWrapper(unet, scheduler.alphas_cumprod)
109
+ if scheduler.config.prediction_type == "v_prediction":
110
+ self.k_diffusion_model = CompVisVDenoiser(model)
111
+ else:
112
+ self.k_diffusion_model = CompVisDenoiser(model)
113
+
114
+ def set_sampler(self, scheduler_type: str):
115
+ warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.")
116
+ return self.set_scheduler(scheduler_type)
117
+
118
+ def set_scheduler(self, scheduler_type: str):
119
+ library = importlib.import_module("k_diffusion")
120
+ sampling = getattr(library, "sampling")
121
+ self.sampler = getattr(sampling, scheduler_type)
122
+
123
+ def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
124
+ r"""
125
+ Enable sliced attention computation.
126
+
127
+ When this option is enabled, the attention module will split the input tensor in slices, to compute attention
128
+ in several steps. This is useful to save some memory in exchange for a small speed decrease.
129
+
130
+ Args:
131
+ slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
132
+ When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
133
+ a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
134
+ `attention_head_dim` must be a multiple of `slice_size`.
135
+ """
136
+ if slice_size == "auto":
137
+ # half the attention head size is usually a good trade-off between
138
+ # speed and memory
139
+ slice_size = self.unet.config.attention_head_dim // 2
140
+ self.unet.set_attention_slice(slice_size)
141
+
142
+ def disable_attention_slicing(self):
143
+ r"""
144
+ Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
145
+ back to computing attention in one step.
146
+ """
147
+ # set slice_size = `None` to disable `attention slicing`
148
+ self.enable_attention_slicing(None)
149
+
150
+ def enable_sequential_cpu_offload(self, gpu_id=0):
151
+ r"""
152
+ Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
153
+ text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
154
+ `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
155
+ """
156
+ if is_accelerate_available():
157
+ from accelerate import cpu_offload
158
+ else:
159
+ raise ImportError("Please install accelerate via `pip install accelerate`")
160
+
161
+ device = torch.device(f"cuda:{gpu_id}")
162
+
163
+ for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
164
+ if cpu_offloaded_model is not None:
165
+ cpu_offload(cpu_offloaded_model, device)
166
+
167
+ @property
168
+ def _execution_device(self):
169
+ r"""
170
+ Returns the device on which the pipeline's models will be executed. After calling
171
+ `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
172
+ hooks.
173
+ """
174
+ if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
175
+ return self.device
176
+ for module in self.unet.modules():
177
+ if (
178
+ hasattr(module, "_hf_hook")
179
+ and hasattr(module._hf_hook, "execution_device")
180
+ and module._hf_hook.execution_device is not None
181
+ ):
182
+ return torch.device(module._hf_hook.execution_device)
183
+ return self.device
184
+
185
+ def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
186
+ r"""
187
+ Encodes the prompt into text encoder hidden states.
188
+
189
+ Args:
190
+ prompt (`str` or `list(int)`):
191
+ prompt to be encoded
192
+ device: (`torch.device`):
193
+ torch device
194
+ num_images_per_prompt (`int`):
195
+ number of images that should be generated per prompt
196
+ do_classifier_free_guidance (`bool`):
197
+ whether to use classifier free guidance or not
198
+ negative_prompt (`str` or `List[str]`):
199
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
200
+ if `guidance_scale` is less than `1`).
201
+ """
202
+ batch_size = len(prompt) if isinstance(prompt, list) else 1
203
+
204
+ text_inputs = self.tokenizer(
205
+ prompt,
206
+ padding="max_length",
207
+ max_length=self.tokenizer.model_max_length,
208
+ truncation=True,
209
+ return_tensors="pt",
210
+ )
211
+ text_input_ids = text_inputs.input_ids
212
+ untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
213
+
214
+ if not torch.equal(text_input_ids, untruncated_ids):
215
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
216
+ logger.warning(
217
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
218
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
219
+ )
220
+
221
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
222
+ attention_mask = text_inputs.attention_mask.to(device)
223
+ else:
224
+ attention_mask = None
225
+
226
+ text_embeddings = self.text_encoder(
227
+ text_input_ids.to(device),
228
+ attention_mask=attention_mask,
229
+ )
230
+ text_embeddings = text_embeddings[0]
231
+
232
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
233
+ bs_embed, seq_len, _ = text_embeddings.shape
234
+ text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
235
+ text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
236
+
237
+ # get unconditional embeddings for classifier free guidance
238
+ if do_classifier_free_guidance:
239
+ uncond_tokens: List[str]
240
+ if negative_prompt is None:
241
+ uncond_tokens = [""] * batch_size
242
+ elif type(prompt) is not type(negative_prompt):
243
+ raise TypeError(
244
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
245
+ f" {type(prompt)}."
246
+ )
247
+ elif isinstance(negative_prompt, str):
248
+ uncond_tokens = [negative_prompt]
249
+ elif batch_size != len(negative_prompt):
250
+ raise ValueError(
251
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
252
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
253
+ " the batch size of `prompt`."
254
+ )
255
+ else:
256
+ uncond_tokens = negative_prompt
257
+
258
+ max_length = text_input_ids.shape[-1]
259
+ uncond_input = self.tokenizer(
260
+ uncond_tokens,
261
+ padding="max_length",
262
+ max_length=max_length,
263
+ truncation=True,
264
+ return_tensors="pt",
265
+ )
266
+
267
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
268
+ attention_mask = uncond_input.attention_mask.to(device)
269
+ else:
270
+ attention_mask = None
271
+
272
+ uncond_embeddings = self.text_encoder(
273
+ uncond_input.input_ids.to(device),
274
+ attention_mask=attention_mask,
275
+ )
276
+ uncond_embeddings = uncond_embeddings[0]
277
+
278
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
279
+ seq_len = uncond_embeddings.shape[1]
280
+ uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
281
+ uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
282
+
283
+ # For classifier free guidance, we need to do two forward passes.
284
+ # Here we concatenate the unconditional and text embeddings into a single batch
285
+ # to avoid doing two forward passes
286
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
287
+
288
+ return text_embeddings
289
+
290
+ def run_safety_checker(self, image, device, dtype):
291
+ if self.safety_checker is not None:
292
+ safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
293
+ image, has_nsfw_concept = self.safety_checker(
294
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
295
+ )
296
+ else:
297
+ has_nsfw_concept = None
298
+ return image, has_nsfw_concept
299
+
300
+ def decode_latents(self, latents):
301
+ latents = 1 / 0.18215 * latents
302
+ image = self.vae.decode(latents).sample
303
+ image = (image / 2 + 0.5).clamp(0, 1)
304
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
305
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
306
+ return image
307
+
308
+ def check_inputs(self, prompt, height, width, callback_steps):
309
+ if not isinstance(prompt, str) and not isinstance(prompt, list):
310
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
311
+
312
+ if height % 8 != 0 or width % 8 != 0:
313
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
314
+
315
+ if (callback_steps is None) or (
316
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
317
+ ):
318
+ raise ValueError(
319
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
320
+ f" {type(callback_steps)}."
321
+ )
322
+
323
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
324
+ shape = (batch_size, num_channels_latents, height // 8, width // 8)
325
+ if latents is None:
326
+ if device.type == "mps":
327
+ # randn does not work reproducibly on mps
328
+ latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
329
+ else:
330
+ latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
331
+ else:
332
+ if latents.shape != shape:
333
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
334
+ latents = latents.to(device)
335
+
336
+ # scale the initial noise by the standard deviation required by the scheduler
337
+ return latents
338
+
339
+ @torch.no_grad()
340
+ def __call__(
341
+ self,
342
+ prompt: Union[str, List[str]],
343
+ height: int = 512,
344
+ width: int = 512,
345
+ num_inference_steps: int = 50,
346
+ guidance_scale: float = 7.5,
347
+ negative_prompt: Optional[Union[str, List[str]]] = None,
348
+ num_images_per_prompt: Optional[int] = 1,
349
+ eta: float = 0.0,
350
+ generator: Optional[torch.Generator] = None,
351
+ latents: Optional[torch.FloatTensor] = None,
352
+ output_type: Optional[str] = "pil",
353
+ return_dict: bool = True,
354
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
355
+ callback_steps: int = 1,
356
+ **kwargs,
357
+ ):
358
+ r"""
359
+ Function invoked when calling the pipeline for generation.
360
+
361
+ Args:
362
+ prompt (`str` or `List[str]`):
363
+ The prompt or prompts to guide the image generation.
364
+ height (`int`, *optional*, defaults to 512):
365
+ The height in pixels of the generated image.
366
+ width (`int`, *optional*, defaults to 512):
367
+ The width in pixels of the generated image.
368
+ num_inference_steps (`int`, *optional*, defaults to 50):
369
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
370
+ expense of slower inference.
371
+ guidance_scale (`float`, *optional*, defaults to 7.5):
372
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
373
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
374
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
375
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
376
+ usually at the expense of lower image quality.
377
+ negative_prompt (`str` or `List[str]`, *optional*):
378
+ The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
379
+ if `guidance_scale` is less than `1`).
380
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
381
+ The number of images to generate per prompt.
382
+ eta (`float`, *optional*, defaults to 0.0):
383
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
384
+ [`schedulers.DDIMScheduler`], will be ignored for others.
385
+ generator (`torch.Generator`, *optional*):
386
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
387
+ deterministic.
388
+ latents (`torch.FloatTensor`, *optional*):
389
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
390
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
391
+ tensor will ge generated by sampling using the supplied random `generator`.
392
+ output_type (`str`, *optional*, defaults to `"pil"`):
393
+ The output format of the generate image. Choose between
394
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
395
+ return_dict (`bool`, *optional*, defaults to `True`):
396
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
397
+ plain tuple.
398
+ callback (`Callable`, *optional*):
399
+ A function that will be called every `callback_steps` steps during inference. The function will be
400
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
401
+ callback_steps (`int`, *optional*, defaults to 1):
402
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
403
+ called at every step.
404
+
405
+ Returns:
406
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
407
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
408
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
409
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
410
+ (nsfw) content, according to the `safety_checker`.
411
+ """
412
+
413
+ # 1. Check inputs. Raise error if not correct
414
+ self.check_inputs(prompt, height, width, callback_steps)
415
+
416
+ # 2. Define call parameters
417
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
418
+ device = self._execution_device
419
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
420
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
421
+ # corresponds to doing no classifier free guidance.
422
+ do_classifier_free_guidance = True
423
+ if guidance_scale <= 1.0:
424
+ raise ValueError("has to use guidance_scale")
425
+
426
+ # 3. Encode input prompt
427
+ text_embeddings = self._encode_prompt(
428
+ prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
429
+ )
430
+
431
+ # 4. Prepare timesteps
432
+ self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device)
433
+ sigmas = self.scheduler.sigmas
434
+ sigmas = sigmas.to(text_embeddings.dtype)
435
+
436
+ # 5. Prepare latent variables
437
+ num_channels_latents = self.unet.config.in_channels
438
+ latents = self.prepare_latents(
439
+ batch_size * num_images_per_prompt,
440
+ num_channels_latents,
441
+ height,
442
+ width,
443
+ text_embeddings.dtype,
444
+ device,
445
+ generator,
446
+ latents,
447
+ )
448
+ latents = latents * sigmas[0]
449
+ self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
450
+ self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device)
451
+
452
+ def model_fn(x, t):
453
+ latent_model_input = torch.cat([x] * 2)
454
+
455
+ noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings)
456
+
457
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
458
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
459
+ return noise_pred
460
+
461
+ latents = self.sampler(model_fn, latents, sigmas)
462
+
463
+ # 8. Post-processing
464
+ image = self.decode_latents(latents)
465
+
466
+ # 9. Run safety checker
467
+ image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
468
+
469
+ # 10. Convert to PIL
470
+ if output_type == "pil":
471
+ image = self.numpy_to_pil(image)
472
+
473
+ if not return_dict:
474
+ return (image, has_nsfw_concept)
475
+
476
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
v0.26.3/sde_drag.py ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import tempfile
3
+ from typing import List, Optional
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from accelerate import Accelerator
9
+ from torchvision import transforms
10
+ from tqdm.auto import tqdm
11
+ from transformers import CLIPTextModel, CLIPTokenizer
12
+
13
+ from diffusers import AutoencoderKL, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel
14
+ from diffusers.loaders import AttnProcsLayers, LoraLoaderMixin
15
+ from diffusers.models.attention_processor import (
16
+ AttnAddedKVProcessor,
17
+ AttnAddedKVProcessor2_0,
18
+ LoRAAttnAddedKVProcessor,
19
+ LoRAAttnProcessor,
20
+ LoRAAttnProcessor2_0,
21
+ SlicedAttnAddedKVProcessor,
22
+ )
23
+ from diffusers.optimization import get_scheduler
24
+
25
+
26
+ class SdeDragPipeline(DiffusionPipeline):
27
+ r"""
28
+ Pipeline for image drag-and-drop editing using stochastic differential equations: https://arxiv.org/abs/2311.01410.
29
+ Please refer to the [official repository](https://github.com/ML-GSAI/SDE-Drag) for more information.
30
+
31
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
32
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
33
+
34
+ Args:
35
+ vae ([`AutoencoderKL`]):
36
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
37
+ text_encoder ([`CLIPTextModel`]):
38
+ Frozen text-encoder. Stable Diffusion uses the text portion of
39
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
40
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
41
+ tokenizer (`CLIPTokenizer`):
42
+ Tokenizer of class
43
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
44
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
45
+ scheduler ([`SchedulerMixin`]):
46
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Please use
47
+ [`DDIMScheduler`].
48
+ """
49
+
50
+ def __init__(
51
+ self,
52
+ vae: AutoencoderKL,
53
+ text_encoder: CLIPTextModel,
54
+ tokenizer: CLIPTokenizer,
55
+ unet: UNet2DConditionModel,
56
+ scheduler: DPMSolverMultistepScheduler,
57
+ ):
58
+ super().__init__()
59
+
60
+ self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
61
+
62
+ @torch.no_grad()
63
+ def __call__(
64
+ self,
65
+ prompt: str,
66
+ image: PIL.Image.Image,
67
+ mask_image: PIL.Image.Image,
68
+ source_points: List[List[int]],
69
+ target_points: List[List[int]],
70
+ t0: Optional[float] = 0.6,
71
+ steps: Optional[int] = 200,
72
+ step_size: Optional[int] = 2,
73
+ image_scale: Optional[float] = 0.3,
74
+ adapt_radius: Optional[int] = 5,
75
+ min_lora_scale: Optional[float] = 0.5,
76
+ generator: Optional[torch.Generator] = None,
77
+ ):
78
+ r"""
79
+ Function invoked when calling the pipeline for image editing.
80
+ Args:
81
+ prompt (`str`, *required*):
82
+ The prompt to guide the image editing.
83
+ image (`PIL.Image.Image`, *required*):
84
+ Which will be edited, parts of the image will be masked out with `mask_image` and edited
85
+ according to `prompt`.
86
+ mask_image (`PIL.Image.Image`, *required*):
87
+ To mask `image`. White pixels in the mask will be edited, while black pixels will be preserved.
88
+ source_points (`List[List[int]]`, *required*):
89
+ Used to mark the starting positions of drag editing in the image, with each pixel represented as a
90
+ `List[int]` of length 2.
91
+ target_points (`List[List[int]]`, *required*):
92
+ Used to mark the target positions of drag editing in the image, with each pixel represented as a
93
+ `List[int]` of length 2.
94
+ t0 (`float`, *optional*, defaults to 0.6):
95
+ The time parameter. Higher t0 improves the fidelity while lowering the faithfulness of the edited images
96
+ and vice versa.
97
+ steps (`int`, *optional*, defaults to 200):
98
+ The number of sampling iterations.
99
+ step_size (`int`, *optional*, defaults to 2):
100
+ The drag diatance of each drag step.
101
+ image_scale (`float`, *optional*, defaults to 0.3):
102
+ To avoid duplicating the content, use image_scale to perturbs the source.
103
+ adapt_radius (`int`, *optional*, defaults to 5):
104
+ The size of the region for copy and paste operations during each step of the drag process.
105
+ min_lora_scale (`float`, *optional*, defaults to 0.5):
106
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
107
+ min_lora_scale specifies the minimum LoRA scale during the image drag-editing process.
108
+ generator ('torch.Generator', *optional*, defaults to None):
109
+ To make generation deterministic(https://pytorch.org/docs/stable/generated/torch.Generator.html).
110
+ Examples:
111
+ ```py
112
+ >>> import PIL
113
+ >>> import torch
114
+ >>> from diffusers import DDIMScheduler, DiffusionPipeline
115
+
116
+ >>> # Load the pipeline
117
+ >>> model_path = "runwayml/stable-diffusion-v1-5"
118
+ >>> scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler")
119
+ >>> pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag")
120
+ >>> pipe.to('cuda')
121
+
122
+ >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality.
123
+ >>> # If not training LoRA, please avoid using torch.float16
124
+ >>> # pipe.to(torch.float16)
125
+
126
+ >>> # Provide prompt, image, mask image, and the starting and target points for drag editing.
127
+ >>> prompt = "prompt of the image"
128
+ >>> image = PIL.Image.open('/path/to/image')
129
+ >>> mask_image = PIL.Image.open('/path/to/mask_image')
130
+ >>> source_points = [[123, 456]]
131
+ >>> target_points = [[234, 567]]
132
+
133
+ >>> # train_lora is optional, and in most cases, using train_lora can better preserve consistency with the original image.
134
+ >>> pipe.train_lora(prompt, image)
135
+
136
+ >>> output = pipe(prompt, image, mask_image, source_points, target_points)
137
+ >>> output_image = PIL.Image.fromarray(output)
138
+ >>> output_image.save("./output.png")
139
+ ```
140
+ """
141
+
142
+ self.scheduler.set_timesteps(steps)
143
+
144
+ noise_scale = (1 - image_scale**2) ** (0.5)
145
+
146
+ text_embeddings = self._get_text_embed(prompt)
147
+ uncond_embeddings = self._get_text_embed([""])
148
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
149
+
150
+ latent = self._get_img_latent(image)
151
+
152
+ mask = mask_image.resize((latent.shape[3], latent.shape[2]))
153
+ mask = torch.tensor(np.array(mask))
154
+ mask = mask.unsqueeze(0).expand_as(latent).to(self.device)
155
+
156
+ source_points = torch.tensor(source_points).div(torch.tensor([8]), rounding_mode="trunc")
157
+ target_points = torch.tensor(target_points).div(torch.tensor([8]), rounding_mode="trunc")
158
+
159
+ distance = target_points - source_points
160
+ distance_norm_max = torch.norm(distance.float(), dim=1, keepdim=True).max()
161
+
162
+ if distance_norm_max <= step_size:
163
+ drag_num = 1
164
+ else:
165
+ drag_num = distance_norm_max.div(torch.tensor([step_size]), rounding_mode="trunc")
166
+ if (distance_norm_max / drag_num - step_size).abs() > (
167
+ distance_norm_max / (drag_num + 1) - step_size
168
+ ).abs():
169
+ drag_num += 1
170
+
171
+ latents = []
172
+ for i in tqdm(range(int(drag_num)), desc="SDE Drag"):
173
+ source_new = source_points + (i / drag_num * distance).to(torch.int)
174
+ target_new = source_points + ((i + 1) / drag_num * distance).to(torch.int)
175
+
176
+ latent, noises, hook_latents, lora_scales, cfg_scales = self._forward(
177
+ latent, steps, t0, min_lora_scale, text_embeddings, generator
178
+ )
179
+ latent = self._copy_and_paste(
180
+ latent,
181
+ source_new,
182
+ target_new,
183
+ adapt_radius,
184
+ latent.shape[2] - 1,
185
+ latent.shape[3] - 1,
186
+ image_scale,
187
+ noise_scale,
188
+ generator,
189
+ )
190
+ latent = self._backward(
191
+ latent, mask, steps, t0, noises, hook_latents, lora_scales, cfg_scales, text_embeddings, generator
192
+ )
193
+
194
+ latents.append(latent)
195
+
196
+ result_image = 1 / 0.18215 * latents[-1]
197
+
198
+ with torch.no_grad():
199
+ result_image = self.vae.decode(result_image).sample
200
+
201
+ result_image = (result_image / 2 + 0.5).clamp(0, 1)
202
+ result_image = result_image.cpu().permute(0, 2, 3, 1).numpy()[0]
203
+ result_image = (result_image * 255).astype(np.uint8)
204
+
205
+ return result_image
206
+
207
+ def train_lora(self, prompt, image, lora_step=100, lora_rank=16, generator=None):
208
+ accelerator = Accelerator(gradient_accumulation_steps=1, mixed_precision="fp16")
209
+
210
+ self.vae.requires_grad_(False)
211
+ self.text_encoder.requires_grad_(False)
212
+ self.unet.requires_grad_(False)
213
+
214
+ unet_lora_attn_procs = {}
215
+ for name, attn_processor in self.unet.attn_processors.items():
216
+ cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim
217
+ if name.startswith("mid_block"):
218
+ hidden_size = self.unet.config.block_out_channels[-1]
219
+ elif name.startswith("up_blocks"):
220
+ block_id = int(name[len("up_blocks.")])
221
+ hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id]
222
+ elif name.startswith("down_blocks"):
223
+ block_id = int(name[len("down_blocks.")])
224
+ hidden_size = self.unet.config.block_out_channels[block_id]
225
+ else:
226
+ raise NotImplementedError("name must start with up_blocks, mid_blocks, or down_blocks")
227
+
228
+ if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)):
229
+ lora_attn_processor_class = LoRAAttnAddedKVProcessor
230
+ else:
231
+ lora_attn_processor_class = (
232
+ LoRAAttnProcessor2_0
233
+ if hasattr(torch.nn.functional, "scaled_dot_product_attention")
234
+ else LoRAAttnProcessor
235
+ )
236
+ unet_lora_attn_procs[name] = lora_attn_processor_class(
237
+ hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank
238
+ )
239
+
240
+ self.unet.set_attn_processor(unet_lora_attn_procs)
241
+ unet_lora_layers = AttnProcsLayers(self.unet.attn_processors)
242
+ params_to_optimize = unet_lora_layers.parameters()
243
+
244
+ optimizer = torch.optim.AdamW(
245
+ params_to_optimize,
246
+ lr=2e-4,
247
+ betas=(0.9, 0.999),
248
+ weight_decay=1e-2,
249
+ eps=1e-08,
250
+ )
251
+
252
+ lr_scheduler = get_scheduler(
253
+ "constant",
254
+ optimizer=optimizer,
255
+ num_warmup_steps=0,
256
+ num_training_steps=lora_step,
257
+ num_cycles=1,
258
+ power=1.0,
259
+ )
260
+
261
+ unet_lora_layers = accelerator.prepare_model(unet_lora_layers)
262
+ optimizer = accelerator.prepare_optimizer(optimizer)
263
+ lr_scheduler = accelerator.prepare_scheduler(lr_scheduler)
264
+
265
+ with torch.no_grad():
266
+ text_inputs = self._tokenize_prompt(prompt, tokenizer_max_length=None)
267
+ text_embedding = self._encode_prompt(
268
+ text_inputs.input_ids, text_inputs.attention_mask, text_encoder_use_attention_mask=False
269
+ )
270
+
271
+ image_transforms = transforms.Compose(
272
+ [
273
+ transforms.ToTensor(),
274
+ transforms.Normalize([0.5], [0.5]),
275
+ ]
276
+ )
277
+
278
+ image = image_transforms(image).to(self.device, dtype=self.vae.dtype)
279
+ image = image.unsqueeze(dim=0)
280
+ latents_dist = self.vae.encode(image).latent_dist
281
+
282
+ for _ in tqdm(range(lora_step), desc="Train LoRA"):
283
+ self.unet.train()
284
+ model_input = latents_dist.sample() * self.vae.config.scaling_factor
285
+
286
+ # Sample noise that we'll add to the latents
287
+ noise = torch.randn(
288
+ model_input.size(),
289
+ dtype=model_input.dtype,
290
+ layout=model_input.layout,
291
+ device=model_input.device,
292
+ generator=generator,
293
+ )
294
+ bsz, channels, height, width = model_input.shape
295
+
296
+ # Sample a random timestep for each image
297
+ timesteps = torch.randint(
298
+ 0, self.scheduler.config.num_train_timesteps, (bsz,), device=model_input.device, generator=generator
299
+ )
300
+ timesteps = timesteps.long()
301
+
302
+ # Add noise to the model input according to the noise magnitude at each timestep
303
+ # (this is the forward diffusion process)
304
+ noisy_model_input = self.scheduler.add_noise(model_input, noise, timesteps)
305
+
306
+ # Predict the noise residual
307
+ model_pred = self.unet(noisy_model_input, timesteps, text_embedding).sample
308
+
309
+ # Get the target for loss depending on the prediction type
310
+ if self.scheduler.config.prediction_type == "epsilon":
311
+ target = noise
312
+ elif self.scheduler.config.prediction_type == "v_prediction":
313
+ target = self.scheduler.get_velocity(model_input, noise, timesteps)
314
+ else:
315
+ raise ValueError(f"Unknown prediction type {self.scheduler.config.prediction_type}")
316
+
317
+ loss = torch.nn.functional.mse_loss(model_pred.float(), target.float(), reduction="mean")
318
+ accelerator.backward(loss)
319
+ optimizer.step()
320
+ lr_scheduler.step()
321
+ optimizer.zero_grad()
322
+
323
+ with tempfile.TemporaryDirectory() as save_lora_dir:
324
+ LoraLoaderMixin.save_lora_weights(
325
+ save_directory=save_lora_dir,
326
+ unet_lora_layers=unet_lora_layers,
327
+ text_encoder_lora_layers=None,
328
+ )
329
+
330
+ self.unet.load_attn_procs(save_lora_dir)
331
+
332
+ def _tokenize_prompt(self, prompt, tokenizer_max_length=None):
333
+ if tokenizer_max_length is not None:
334
+ max_length = tokenizer_max_length
335
+ else:
336
+ max_length = self.tokenizer.model_max_length
337
+
338
+ text_inputs = self.tokenizer(
339
+ prompt,
340
+ truncation=True,
341
+ padding="max_length",
342
+ max_length=max_length,
343
+ return_tensors="pt",
344
+ )
345
+
346
+ return text_inputs
347
+
348
+ def _encode_prompt(self, input_ids, attention_mask, text_encoder_use_attention_mask=False):
349
+ text_input_ids = input_ids.to(self.device)
350
+
351
+ if text_encoder_use_attention_mask:
352
+ attention_mask = attention_mask.to(self.device)
353
+ else:
354
+ attention_mask = None
355
+
356
+ prompt_embeds = self.text_encoder(
357
+ text_input_ids,
358
+ attention_mask=attention_mask,
359
+ )
360
+ prompt_embeds = prompt_embeds[0]
361
+
362
+ return prompt_embeds
363
+
364
+ @torch.no_grad()
365
+ def _get_text_embed(self, prompt):
366
+ text_input = self.tokenizer(
367
+ prompt,
368
+ padding="max_length",
369
+ max_length=self.tokenizer.model_max_length,
370
+ truncation=True,
371
+ return_tensors="pt",
372
+ )
373
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
374
+ return text_embeddings
375
+
376
+ def _copy_and_paste(
377
+ self, latent, source_new, target_new, adapt_radius, max_height, max_width, image_scale, noise_scale, generator
378
+ ):
379
+ def adaption_r(source, target, adapt_radius, max_height, max_width):
380
+ r_x_lower = min(adapt_radius, source[0], target[0])
381
+ r_x_upper = min(adapt_radius, max_width - source[0], max_width - target[0])
382
+ r_y_lower = min(adapt_radius, source[1], target[1])
383
+ r_y_upper = min(adapt_radius, max_height - source[1], max_height - target[1])
384
+ return r_x_lower, r_x_upper, r_y_lower, r_y_upper
385
+
386
+ for source_, target_ in zip(source_new, target_new):
387
+ r_x_lower, r_x_upper, r_y_lower, r_y_upper = adaption_r(
388
+ source_, target_, adapt_radius, max_height, max_width
389
+ )
390
+
391
+ source_feature = latent[
392
+ :, :, source_[1] - r_y_lower : source_[1] + r_y_upper, source_[0] - r_x_lower : source_[0] + r_x_upper
393
+ ].clone()
394
+
395
+ latent[
396
+ :, :, source_[1] - r_y_lower : source_[1] + r_y_upper, source_[0] - r_x_lower : source_[0] + r_x_upper
397
+ ] = image_scale * source_feature + noise_scale * torch.randn(
398
+ latent.shape[0],
399
+ 4,
400
+ r_y_lower + r_y_upper,
401
+ r_x_lower + r_x_upper,
402
+ device=self.device,
403
+ generator=generator,
404
+ )
405
+
406
+ latent[
407
+ :, :, target_[1] - r_y_lower : target_[1] + r_y_upper, target_[0] - r_x_lower : target_[0] + r_x_upper
408
+ ] = source_feature * 1.1
409
+ return latent
410
+
411
+ @torch.no_grad()
412
+ def _get_img_latent(self, image, height=None, weight=None):
413
+ data = image.convert("RGB")
414
+ if height is not None:
415
+ data = data.resize((weight, height))
416
+ transform = transforms.ToTensor()
417
+ data = transform(data).unsqueeze(0)
418
+ data = (data * 2.0) - 1.0
419
+ data = data.to(self.device, dtype=self.vae.dtype)
420
+ latent = self.vae.encode(data).latent_dist.sample()
421
+ latent = 0.18215 * latent
422
+ return latent
423
+
424
+ @torch.no_grad()
425
+ def _get_eps(self, latent, timestep, guidance_scale, text_embeddings, lora_scale=None):
426
+ latent_model_input = torch.cat([latent] * 2) if guidance_scale > 1.0 else latent
427
+ text_embeddings = text_embeddings if guidance_scale > 1.0 else text_embeddings.chunk(2)[1]
428
+
429
+ cross_attention_kwargs = None if lora_scale is None else {"scale": lora_scale}
430
+
431
+ with torch.no_grad():
432
+ noise_pred = self.unet(
433
+ latent_model_input,
434
+ timestep,
435
+ encoder_hidden_states=text_embeddings,
436
+ cross_attention_kwargs=cross_attention_kwargs,
437
+ ).sample
438
+
439
+ if guidance_scale > 1.0:
440
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
441
+ elif guidance_scale == 1.0:
442
+ noise_pred_text = noise_pred
443
+ noise_pred_uncond = 0.0
444
+ else:
445
+ raise NotImplementedError(guidance_scale)
446
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
447
+
448
+ return noise_pred
449
+
450
+ def _forward_sde(
451
+ self, timestep, sample, guidance_scale, text_embeddings, steps, eta=1.0, lora_scale=None, generator=None
452
+ ):
453
+ num_train_timesteps = len(self.scheduler)
454
+ alphas_cumprod = self.scheduler.alphas_cumprod
455
+ initial_alpha_cumprod = torch.tensor(1.0)
456
+
457
+ prev_timestep = timestep + num_train_timesteps // steps
458
+
459
+ alpha_prod_t = alphas_cumprod[timestep] if timestep >= 0 else initial_alpha_cumprod
460
+ alpha_prod_t_prev = alphas_cumprod[prev_timestep]
461
+
462
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
463
+
464
+ x_prev = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) * sample + (1 - alpha_prod_t_prev / alpha_prod_t) ** (
465
+ 0.5
466
+ ) * torch.randn(
467
+ sample.size(), dtype=sample.dtype, layout=sample.layout, device=self.device, generator=generator
468
+ )
469
+ eps = self._get_eps(x_prev, prev_timestep, guidance_scale, text_embeddings, lora_scale)
470
+
471
+ sigma_t_prev = (
472
+ eta
473
+ * (1 - alpha_prod_t) ** (0.5)
474
+ * (1 - alpha_prod_t_prev / (1 - alpha_prod_t_prev) * (1 - alpha_prod_t) / alpha_prod_t) ** (0.5)
475
+ )
476
+
477
+ pred_original_sample = (x_prev - beta_prod_t_prev ** (0.5) * eps) / alpha_prod_t_prev ** (0.5)
478
+ pred_sample_direction_coeff = (1 - alpha_prod_t - sigma_t_prev**2) ** (0.5)
479
+
480
+ noise = (
481
+ sample - alpha_prod_t ** (0.5) * pred_original_sample - pred_sample_direction_coeff * eps
482
+ ) / sigma_t_prev
483
+
484
+ return x_prev, noise
485
+
486
+ def _sample(
487
+ self,
488
+ timestep,
489
+ sample,
490
+ guidance_scale,
491
+ text_embeddings,
492
+ steps,
493
+ sde=False,
494
+ noise=None,
495
+ eta=1.0,
496
+ lora_scale=None,
497
+ generator=None,
498
+ ):
499
+ num_train_timesteps = len(self.scheduler)
500
+ alphas_cumprod = self.scheduler.alphas_cumprod
501
+ final_alpha_cumprod = torch.tensor(1.0)
502
+
503
+ eps = self._get_eps(sample, timestep, guidance_scale, text_embeddings, lora_scale)
504
+
505
+ prev_timestep = timestep - num_train_timesteps // steps
506
+
507
+ alpha_prod_t = alphas_cumprod[timestep]
508
+ alpha_prod_t_prev = alphas_cumprod[prev_timestep] if prev_timestep >= 0 else final_alpha_cumprod
509
+
510
+ beta_prod_t = 1 - alpha_prod_t
511
+
512
+ sigma_t = (
513
+ eta
514
+ * ((1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) ** (0.5)
515
+ * (1 - alpha_prod_t / alpha_prod_t_prev) ** (0.5)
516
+ if sde
517
+ else 0
518
+ )
519
+
520
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * eps) / alpha_prod_t ** (0.5)
521
+ pred_sample_direction_coeff = (1 - alpha_prod_t_prev - sigma_t**2) ** (0.5)
522
+
523
+ noise = (
524
+ torch.randn(
525
+ sample.size(), dtype=sample.dtype, layout=sample.layout, device=self.device, generator=generator
526
+ )
527
+ if noise is None
528
+ else noise
529
+ )
530
+ latent = (
531
+ alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction_coeff * eps + sigma_t * noise
532
+ )
533
+
534
+ return latent
535
+
536
+ def _forward(self, latent, steps, t0, lora_scale_min, text_embeddings, generator):
537
+ def scale_schedule(begin, end, n, length, type="linear"):
538
+ if type == "constant":
539
+ return end
540
+ elif type == "linear":
541
+ return begin + (end - begin) * n / length
542
+ elif type == "cos":
543
+ factor = (1 - math.cos(n * math.pi / length)) / 2
544
+ return (1 - factor) * begin + factor * end
545
+ else:
546
+ raise NotImplementedError(type)
547
+
548
+ noises = []
549
+ latents = []
550
+ lora_scales = []
551
+ cfg_scales = []
552
+ latents.append(latent)
553
+ t0 = int(t0 * steps)
554
+ t_begin = steps - t0
555
+
556
+ length = len(self.scheduler.timesteps[t_begin - 1 : -1]) - 1
557
+ index = 1
558
+ for t in self.scheduler.timesteps[t_begin:].flip(dims=[0]):
559
+ lora_scale = scale_schedule(1, lora_scale_min, index, length, type="cos")
560
+ cfg_scale = scale_schedule(1, 3.0, index, length, type="linear")
561
+ latent, noise = self._forward_sde(
562
+ t, latent, cfg_scale, text_embeddings, steps, lora_scale=lora_scale, generator=generator
563
+ )
564
+
565
+ noises.append(noise)
566
+ latents.append(latent)
567
+ lora_scales.append(lora_scale)
568
+ cfg_scales.append(cfg_scale)
569
+ index += 1
570
+ return latent, noises, latents, lora_scales, cfg_scales
571
+
572
+ def _backward(
573
+ self, latent, mask, steps, t0, noises, hook_latents, lora_scales, cfg_scales, text_embeddings, generator
574
+ ):
575
+ t0 = int(t0 * steps)
576
+ t_begin = steps - t0
577
+
578
+ hook_latent = hook_latents.pop()
579
+ latent = torch.where(mask > 128, latent, hook_latent)
580
+ for t in self.scheduler.timesteps[t_begin - 1 : -1]:
581
+ latent = self._sample(
582
+ t,
583
+ latent,
584
+ cfg_scales.pop(),
585
+ text_embeddings,
586
+ steps,
587
+ sde=True,
588
+ noise=noises.pop(),
589
+ lora_scale=lora_scales.pop(),
590
+ generator=generator,
591
+ )
592
+ hook_latent = hook_latents.pop()
593
+ latent = torch.where(mask > 128, latent, hook_latent)
594
+ return latent