multimodalart HF staff commited on
Commit
81232f8
1 Parent(s): 5f1eb4c

Create pipeline_stable_diffusion_3_ipa.py

Browse files
Files changed (1) hide show
  1. pipeline_stable_diffusion_3_ipa.py +1235 -0
pipeline_stable_diffusion_3_ipa.py ADDED
@@ -0,0 +1,1235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Stability AI, The HuggingFace Team and The InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ from transformers import (
22
+ CLIPTextModelWithProjection,
23
+ CLIPTokenizer,
24
+ T5EncoderModel,
25
+ T5TokenizerFast,
26
+ )
27
+
28
+ from diffusers.image_processor import VaeImageProcessor
29
+ from diffusers.loaders import FromSingleFileMixin, SD3LoraLoaderMixin
30
+ from diffusers.models.autoencoders import AutoencoderKL
31
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
32
+ from diffusers.utils import (
33
+ USE_PEFT_BACKEND,
34
+ is_torch_xla_available,
35
+ logging,
36
+ replace_example_docstring,
37
+ scale_lora_layers,
38
+ unscale_lora_layers,
39
+ )
40
+ from diffusers.utils.torch_utils import randn_tensor
41
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
42
+ from diffusers.pipelines.stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput
43
+
44
+ from models.resampler import TimeResampler
45
+ from models.transformer_sd3 import SD3Transformer2DModel
46
+ from diffusers.models.normalization import RMSNorm
47
+ from einops import rearrange
48
+
49
+
50
+ if is_torch_xla_available():
51
+ import torch_xla.core.xla_model as xm
52
+
53
+ XLA_AVAILABLE = True
54
+ else:
55
+ XLA_AVAILABLE = False
56
+
57
+
58
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
59
+
60
+ EXAMPLE_DOC_STRING = """
61
+ Examples:
62
+ ```py
63
+ >>> import torch
64
+ >>> from diffusers import StableDiffusion3Pipeline
65
+
66
+ >>> pipe = StableDiffusion3Pipeline.from_pretrained(
67
+ ... "stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16
68
+ ... )
69
+ >>> pipe.to("cuda")
70
+ >>> prompt = "A cat holding a sign that says hello world"
71
+ >>> image = pipe(prompt).images[0]
72
+ >>> image.save("sd3.png")
73
+ ```
74
+ """
75
+
76
+
77
+ class AdaLayerNorm(nn.Module):
78
+ """
79
+ Norm layer adaptive layer norm zero (adaLN-Zero).
80
+
81
+ Parameters:
82
+ embedding_dim (`int`): The size of each embedding vector.
83
+ num_embeddings (`int`): The size of the embeddings dictionary.
84
+ """
85
+
86
+ def __init__(self, embedding_dim: int, time_embedding_dim=None, mode='normal'):
87
+ super().__init__()
88
+
89
+ self.silu = nn.SiLU()
90
+ num_params_dict = dict(
91
+ zero=6,
92
+ normal=2,
93
+ )
94
+ num_params = num_params_dict[mode]
95
+ self.linear = nn.Linear(time_embedding_dim or embedding_dim, num_params * embedding_dim, bias=True)
96
+ self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
97
+ self.mode = mode
98
+
99
+ def forward(
100
+ self,
101
+ x,
102
+ hidden_dtype = None,
103
+ emb = None,
104
+ ):
105
+ emb = self.linear(self.silu(emb))
106
+ if self.mode == 'normal':
107
+ shift_msa, scale_msa = emb.chunk(2, dim=1)
108
+ x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
109
+ return x
110
+
111
+ elif self.mode == 'zero':
112
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)
113
+ x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
114
+ return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
115
+
116
+
117
+ class JointIPAttnProcessor(torch.nn.Module):
118
+ """Attention processor used typically in processing the SD3-like self-attention projections."""
119
+
120
+ def __init__(
121
+ self,
122
+ hidden_size=None,
123
+ cross_attention_dim=None,
124
+ ip_hidden_states_dim=None,
125
+ ip_encoder_hidden_states_dim=None,
126
+ head_dim=None,
127
+ timesteps_emb_dim=1280,
128
+ ):
129
+ super().__init__()
130
+
131
+ self.norm_ip = AdaLayerNorm(ip_hidden_states_dim, time_embedding_dim=timesteps_emb_dim)
132
+ self.to_k_ip = nn.Linear(ip_hidden_states_dim, hidden_size, bias=False)
133
+ self.to_v_ip = nn.Linear(ip_hidden_states_dim, hidden_size, bias=False)
134
+ self.norm_q = RMSNorm(head_dim, 1e-6)
135
+ self.norm_k = RMSNorm(head_dim, 1e-6)
136
+ self.norm_ip_k = RMSNorm(head_dim, 1e-6)
137
+
138
+
139
+ def __call__(
140
+ self,
141
+ attn,
142
+ hidden_states: torch.FloatTensor,
143
+ encoder_hidden_states: torch.FloatTensor = None,
144
+ attention_mask: Optional[torch.FloatTensor] = None,
145
+ emb_dict=None,
146
+ *args,
147
+ **kwargs,
148
+ ) -> torch.FloatTensor:
149
+ residual = hidden_states
150
+
151
+ batch_size = hidden_states.shape[0]
152
+
153
+ # `sample` projections.
154
+ query = attn.to_q(hidden_states)
155
+ key = attn.to_k(hidden_states)
156
+ value = attn.to_v(hidden_states)
157
+ img_query = query
158
+ img_key = key
159
+ img_value = value
160
+
161
+ inner_dim = key.shape[-1]
162
+ head_dim = inner_dim // attn.heads
163
+
164
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
165
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
166
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
167
+
168
+ if attn.norm_q is not None:
169
+ query = attn.norm_q(query)
170
+ if attn.norm_k is not None:
171
+ key = attn.norm_k(key)
172
+
173
+ # `context` projections.
174
+ if encoder_hidden_states is not None:
175
+ encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states)
176
+ encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states)
177
+ encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states)
178
+
179
+ encoder_hidden_states_query_proj = encoder_hidden_states_query_proj.view(
180
+ batch_size, -1, attn.heads, head_dim
181
+ ).transpose(1, 2)
182
+ encoder_hidden_states_key_proj = encoder_hidden_states_key_proj.view(
183
+ batch_size, -1, attn.heads, head_dim
184
+ ).transpose(1, 2)
185
+ encoder_hidden_states_value_proj = encoder_hidden_states_value_proj.view(
186
+ batch_size, -1, attn.heads, head_dim
187
+ ).transpose(1, 2)
188
+
189
+ if attn.norm_added_q is not None:
190
+ encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj)
191
+ if attn.norm_added_k is not None:
192
+ encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj)
193
+
194
+ query = torch.cat([query, encoder_hidden_states_query_proj], dim=2)
195
+ key = torch.cat([key, encoder_hidden_states_key_proj], dim=2)
196
+ value = torch.cat([value, encoder_hidden_states_value_proj], dim=2)
197
+
198
+ hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
199
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
200
+ hidden_states = hidden_states.to(query.dtype)
201
+
202
+ if encoder_hidden_states is not None:
203
+ # Split the attention outputs.
204
+ hidden_states, encoder_hidden_states = (
205
+ hidden_states[:, : residual.shape[1]],
206
+ hidden_states[:, residual.shape[1] :],
207
+ )
208
+ if not attn.context_pre_only:
209
+ encoder_hidden_states = attn.to_add_out(encoder_hidden_states)
210
+
211
+
212
+ # IPadapter
213
+ ip_hidden_states = emb_dict.get('ip_hidden_states', None)
214
+ ip_hidden_states = self.get_ip_hidden_states(
215
+ attn,
216
+ img_query,
217
+ ip_hidden_states,
218
+ img_key,
219
+ img_value,
220
+ None,
221
+ None,
222
+ emb_dict['temb'],
223
+ )
224
+ if ip_hidden_states is not None:
225
+ hidden_states = hidden_states + ip_hidden_states * emb_dict.get('scale', 1.0)
226
+
227
+
228
+ # linear proj
229
+ hidden_states = attn.to_out[0](hidden_states)
230
+ # dropout
231
+ hidden_states = attn.to_out[1](hidden_states)
232
+
233
+ if encoder_hidden_states is not None:
234
+ return hidden_states, encoder_hidden_states
235
+ else:
236
+ return hidden_states
237
+
238
+
239
+ def get_ip_hidden_states(self, attn, query, ip_hidden_states, img_key=None, img_value=None, text_key=None, text_value=None, temb=None):
240
+ if ip_hidden_states is None:
241
+ return None
242
+
243
+ if not hasattr(self, 'to_k_ip') or not hasattr(self, 'to_v_ip'):
244
+ return None
245
+
246
+ # norm ip input
247
+ norm_ip_hidden_states = self.norm_ip(ip_hidden_states, emb=temb)
248
+
249
+ # to k and v
250
+ ip_key = self.to_k_ip(norm_ip_hidden_states)
251
+ ip_value = self.to_v_ip(norm_ip_hidden_states)
252
+
253
+ # reshape
254
+ query = rearrange(query, 'b l (h d) -> b h l d', h=attn.heads)
255
+ img_key = rearrange(img_key, 'b l (h d) -> b h l d', h=attn.heads)
256
+ img_value = rearrange(img_value, 'b l (h d) -> b h l d', h=attn.heads)
257
+ ip_key = rearrange(ip_key, 'b l (h d) -> b h l d', h=attn.heads)
258
+ ip_value = rearrange(ip_value, 'b l (h d) -> b h l d', h=attn.heads)
259
+
260
+ # norm
261
+ query = self.norm_q(query)
262
+ img_key = self.norm_k(img_key)
263
+ ip_key = self.norm_ip_k(ip_key)
264
+
265
+ # cat img
266
+ key = torch.cat([img_key, ip_key], dim=2)
267
+ value = torch.cat([img_value, ip_value], dim=2)
268
+
269
+ #
270
+ ip_hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
271
+ ip_hidden_states = rearrange(ip_hidden_states, 'b h l d -> b l (h d)')
272
+ ip_hidden_states = ip_hidden_states.to(query.dtype)
273
+ return ip_hidden_states
274
+
275
+
276
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
277
+ def retrieve_timesteps(
278
+ scheduler,
279
+ num_inference_steps: Optional[int] = None,
280
+ device: Optional[Union[str, torch.device]] = None,
281
+ timesteps: Optional[List[int]] = None,
282
+ sigmas: Optional[List[float]] = None,
283
+ **kwargs,
284
+ ):
285
+ """
286
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
287
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
288
+
289
+ Args:
290
+ scheduler (`SchedulerMixin`):
291
+ The scheduler to get timesteps from.
292
+ num_inference_steps (`int`):
293
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
294
+ must be `None`.
295
+ device (`str` or `torch.device`, *optional*):
296
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
297
+ timesteps (`List[int]`, *optional*):
298
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
299
+ `num_inference_steps` and `sigmas` must be `None`.
300
+ sigmas (`List[float]`, *optional*):
301
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
302
+ `num_inference_steps` and `timesteps` must be `None`.
303
+
304
+ Returns:
305
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
306
+ second element is the number of inference steps.
307
+ """
308
+ if timesteps is not None and sigmas is not None:
309
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
310
+ if timesteps is not None:
311
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
312
+ if not accepts_timesteps:
313
+ raise ValueError(
314
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
315
+ f" timestep schedules. Please check whether you are using the correct scheduler."
316
+ )
317
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
318
+ timesteps = scheduler.timesteps
319
+ num_inference_steps = len(timesteps)
320
+ elif sigmas is not None:
321
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
322
+ if not accept_sigmas:
323
+ raise ValueError(
324
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
325
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
326
+ )
327
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
328
+ timesteps = scheduler.timesteps
329
+ num_inference_steps = len(timesteps)
330
+ else:
331
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
332
+ timesteps = scheduler.timesteps
333
+ return timesteps, num_inference_steps
334
+
335
+
336
+ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingleFileMixin):
337
+ r"""
338
+ Args:
339
+ transformer ([`SD3Transformer2DModel`]):
340
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
341
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
342
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
343
+ vae ([`AutoencoderKL`]):
344
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
345
+ text_encoder ([`CLIPTextModelWithProjection`]):
346
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
347
+ specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant,
348
+ with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size`
349
+ as its dimension.
350
+ text_encoder_2 ([`CLIPTextModelWithProjection`]):
351
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
352
+ specifically the
353
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
354
+ variant.
355
+ text_encoder_3 ([`T5EncoderModel`]):
356
+ Frozen text-encoder. Stable Diffusion 3 uses
357
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
358
+ [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
359
+ tokenizer (`CLIPTokenizer`):
360
+ Tokenizer of class
361
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
362
+ tokenizer_2 (`CLIPTokenizer`):
363
+ Second Tokenizer of class
364
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
365
+ tokenizer_3 (`T5TokenizerFast`):
366
+ Tokenizer of class
367
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
368
+ """
369
+
370
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae"
371
+ _optional_components = []
372
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"]
373
+
374
+ def __init__(
375
+ self,
376
+ transformer: SD3Transformer2DModel,
377
+ scheduler: FlowMatchEulerDiscreteScheduler,
378
+ vae: AutoencoderKL,
379
+ text_encoder: CLIPTextModelWithProjection,
380
+ tokenizer: CLIPTokenizer,
381
+ text_encoder_2: CLIPTextModelWithProjection,
382
+ tokenizer_2: CLIPTokenizer,
383
+ text_encoder_3: T5EncoderModel,
384
+ tokenizer_3: T5TokenizerFast,
385
+ ):
386
+ super().__init__()
387
+
388
+ self.register_modules(
389
+ vae=vae,
390
+ text_encoder=text_encoder,
391
+ text_encoder_2=text_encoder_2,
392
+ text_encoder_3=text_encoder_3,
393
+ tokenizer=tokenizer,
394
+ tokenizer_2=tokenizer_2,
395
+ tokenizer_3=tokenizer_3,
396
+ transformer=transformer,
397
+ scheduler=scheduler,
398
+ )
399
+ self.vae_scale_factor = (
400
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
401
+ )
402
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
403
+ self.tokenizer_max_length = (
404
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
405
+ )
406
+ self.default_sample_size = (
407
+ self.transformer.config.sample_size
408
+ if hasattr(self, "transformer") and self.transformer is not None
409
+ else 128
410
+ )
411
+
412
+ def _get_t5_prompt_embeds(
413
+ self,
414
+ prompt: Union[str, List[str]] = None,
415
+ num_images_per_prompt: int = 1,
416
+ max_sequence_length: int = 256,
417
+ device: Optional[torch.device] = None,
418
+ dtype: Optional[torch.dtype] = None,
419
+ ):
420
+ device = device or self._execution_device
421
+ dtype = dtype or self.text_encoder.dtype
422
+
423
+ prompt = [prompt] if isinstance(prompt, str) else prompt
424
+ batch_size = len(prompt)
425
+
426
+ if self.text_encoder_3 is None:
427
+ return torch.zeros(
428
+ (
429
+ batch_size * num_images_per_prompt,
430
+ self.tokenizer_max_length,
431
+ self.transformer.config.joint_attention_dim,
432
+ ),
433
+ device=device,
434
+ dtype=dtype,
435
+ )
436
+
437
+ text_inputs = self.tokenizer_3(
438
+ prompt,
439
+ padding="max_length",
440
+ max_length=max_sequence_length,
441
+ truncation=True,
442
+ add_special_tokens=True,
443
+ return_tensors="pt",
444
+ )
445
+ text_input_ids = text_inputs.input_ids
446
+ untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids
447
+
448
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
449
+ removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
450
+ logger.warning(
451
+ "The following part of your input was truncated because `max_sequence_length` is set to "
452
+ f" {max_sequence_length} tokens: {removed_text}"
453
+ )
454
+
455
+ prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
456
+
457
+ dtype = self.text_encoder_3.dtype
458
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
459
+
460
+ _, seq_len, _ = prompt_embeds.shape
461
+
462
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
463
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
464
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
465
+
466
+ return prompt_embeds
467
+
468
+ def _get_clip_prompt_embeds(
469
+ self,
470
+ prompt: Union[str, List[str]],
471
+ num_images_per_prompt: int = 1,
472
+ device: Optional[torch.device] = None,
473
+ clip_skip: Optional[int] = None,
474
+ clip_model_index: int = 0,
475
+ ):
476
+ device = device or self._execution_device
477
+
478
+ clip_tokenizers = [self.tokenizer, self.tokenizer_2]
479
+ clip_text_encoders = [self.text_encoder, self.text_encoder_2]
480
+
481
+ tokenizer = clip_tokenizers[clip_model_index]
482
+ text_encoder = clip_text_encoders[clip_model_index]
483
+
484
+ prompt = [prompt] if isinstance(prompt, str) else prompt
485
+ batch_size = len(prompt)
486
+
487
+ text_inputs = tokenizer(
488
+ prompt,
489
+ padding="max_length",
490
+ max_length=self.tokenizer_max_length,
491
+ truncation=True,
492
+ return_tensors="pt",
493
+ )
494
+
495
+ text_input_ids = text_inputs.input_ids
496
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
497
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
498
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
499
+ logger.warning(
500
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
501
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
502
+ )
503
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
504
+ pooled_prompt_embeds = prompt_embeds[0]
505
+
506
+ if clip_skip is None:
507
+ prompt_embeds = prompt_embeds.hidden_states[-2]
508
+ else:
509
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
510
+
511
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
512
+
513
+ _, seq_len, _ = prompt_embeds.shape
514
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
515
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
516
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
517
+
518
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1)
519
+ pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1)
520
+
521
+ return prompt_embeds, pooled_prompt_embeds
522
+
523
+ def encode_prompt(
524
+ self,
525
+ prompt: Union[str, List[str]],
526
+ prompt_2: Union[str, List[str]],
527
+ prompt_3: Union[str, List[str]],
528
+ device: Optional[torch.device] = None,
529
+ num_images_per_prompt: int = 1,
530
+ do_classifier_free_guidance: bool = True,
531
+ negative_prompt: Optional[Union[str, List[str]]] = None,
532
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
533
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
534
+ prompt_embeds: Optional[torch.FloatTensor] = None,
535
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
536
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
537
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
538
+ clip_skip: Optional[int] = None,
539
+ max_sequence_length: int = 256,
540
+ lora_scale: Optional[float] = None,
541
+ ):
542
+ r"""
543
+
544
+ Args:
545
+ prompt (`str` or `List[str]`, *optional*):
546
+ prompt to be encoded
547
+ prompt_2 (`str` or `List[str]`, *optional*):
548
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
549
+ used in all text-encoders
550
+ prompt_3 (`str` or `List[str]`, *optional*):
551
+ The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
552
+ used in all text-encoders
553
+ device: (`torch.device`):
554
+ torch device
555
+ num_images_per_prompt (`int`):
556
+ number of images that should be generated per prompt
557
+ do_classifier_free_guidance (`bool`):
558
+ whether to use classifier free guidance or not
559
+ negative_prompt (`str` or `List[str]`, *optional*):
560
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
561
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
562
+ less than `1`).
563
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
564
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
565
+ `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders.
566
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
567
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
568
+ `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders
569
+ prompt_embeds (`torch.FloatTensor`, *optional*):
570
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
571
+ provided, text embeddings will be generated from `prompt` input argument.
572
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
573
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
574
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
575
+ argument.
576
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
577
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
578
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
579
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
580
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
581
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
582
+ input argument.
583
+ clip_skip (`int`, *optional*):
584
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
585
+ the output of the pre-final layer will be used for computing the prompt embeddings.
586
+ lora_scale (`float`, *optional*):
587
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
588
+ """
589
+ device = device or self._execution_device
590
+
591
+ # set lora scale so that monkey patched LoRA
592
+ # function of text encoder can correctly access it
593
+ if lora_scale is not None and isinstance(self, SD3LoraLoaderMixin):
594
+ self._lora_scale = lora_scale
595
+
596
+ # dynamically adjust the LoRA scale
597
+ if self.text_encoder is not None and USE_PEFT_BACKEND:
598
+ scale_lora_layers(self.text_encoder, lora_scale)
599
+ if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
600
+ scale_lora_layers(self.text_encoder_2, lora_scale)
601
+
602
+ prompt = [prompt] if isinstance(prompt, str) else prompt
603
+ if prompt is not None:
604
+ batch_size = len(prompt)
605
+ else:
606
+ batch_size = prompt_embeds.shape[0]
607
+
608
+ if prompt_embeds is None:
609
+ prompt_2 = prompt_2 or prompt
610
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
611
+
612
+ prompt_3 = prompt_3 or prompt
613
+ prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3
614
+
615
+ prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds(
616
+ prompt=prompt,
617
+ device=device,
618
+ num_images_per_prompt=num_images_per_prompt,
619
+ clip_skip=clip_skip,
620
+ clip_model_index=0,
621
+ )
622
+ prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds(
623
+ prompt=prompt_2,
624
+ device=device,
625
+ num_images_per_prompt=num_images_per_prompt,
626
+ clip_skip=clip_skip,
627
+ clip_model_index=1,
628
+ )
629
+ clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1)
630
+
631
+ t5_prompt_embed = self._get_t5_prompt_embeds(
632
+ prompt=prompt_3,
633
+ num_images_per_prompt=num_images_per_prompt,
634
+ max_sequence_length=max_sequence_length,
635
+ device=device,
636
+ )
637
+
638
+ clip_prompt_embeds = torch.nn.functional.pad(
639
+ clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])
640
+ )
641
+
642
+ prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2)
643
+ pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1)
644
+
645
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
646
+ negative_prompt = negative_prompt or ""
647
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
648
+ negative_prompt_3 = negative_prompt_3 or negative_prompt
649
+
650
+ # normalize str to list
651
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
652
+ negative_prompt_2 = (
653
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
654
+ )
655
+ negative_prompt_3 = (
656
+ batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3
657
+ )
658
+
659
+ if prompt is not None and type(prompt) is not type(negative_prompt):
660
+ raise TypeError(
661
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
662
+ f" {type(prompt)}."
663
+ )
664
+ elif batch_size != len(negative_prompt):
665
+ raise ValueError(
666
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
667
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
668
+ " the batch size of `prompt`."
669
+ )
670
+
671
+ negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds(
672
+ negative_prompt,
673
+ device=device,
674
+ num_images_per_prompt=num_images_per_prompt,
675
+ clip_skip=None,
676
+ clip_model_index=0,
677
+ )
678
+ negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds(
679
+ negative_prompt_2,
680
+ device=device,
681
+ num_images_per_prompt=num_images_per_prompt,
682
+ clip_skip=None,
683
+ clip_model_index=1,
684
+ )
685
+ negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
686
+
687
+ t5_negative_prompt_embed = self._get_t5_prompt_embeds(
688
+ prompt=negative_prompt_3,
689
+ num_images_per_prompt=num_images_per_prompt,
690
+ max_sequence_length=max_sequence_length,
691
+ device=device,
692
+ )
693
+
694
+ negative_clip_prompt_embeds = torch.nn.functional.pad(
695
+ negative_clip_prompt_embeds,
696
+ (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]),
697
+ )
698
+
699
+ negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2)
700
+ negative_pooled_prompt_embeds = torch.cat(
701
+ [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1
702
+ )
703
+
704
+ if self.text_encoder is not None:
705
+ if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND:
706
+ # Retrieve the original scale by scaling back the LoRA layers
707
+ unscale_lora_layers(self.text_encoder, lora_scale)
708
+
709
+ if self.text_encoder_2 is not None:
710
+ if isinstance(self, SD3LoraLoaderMixin) and USE_PEFT_BACKEND:
711
+ # Retrieve the original scale by scaling back the LoRA layers
712
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
713
+
714
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
715
+
716
+ def check_inputs(
717
+ self,
718
+ prompt,
719
+ prompt_2,
720
+ prompt_3,
721
+ height,
722
+ width,
723
+ negative_prompt=None,
724
+ negative_prompt_2=None,
725
+ negative_prompt_3=None,
726
+ prompt_embeds=None,
727
+ negative_prompt_embeds=None,
728
+ pooled_prompt_embeds=None,
729
+ negative_pooled_prompt_embeds=None,
730
+ callback_on_step_end_tensor_inputs=None,
731
+ max_sequence_length=None,
732
+ ):
733
+ if height % 8 != 0 or width % 8 != 0:
734
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
735
+
736
+ if callback_on_step_end_tensor_inputs is not None and not all(
737
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
738
+ ):
739
+ raise ValueError(
740
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
741
+ )
742
+
743
+ if prompt is not None and prompt_embeds is not None:
744
+ raise ValueError(
745
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
746
+ " only forward one of the two."
747
+ )
748
+ elif prompt_2 is not None and prompt_embeds is not None:
749
+ raise ValueError(
750
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
751
+ " only forward one of the two."
752
+ )
753
+ elif prompt_3 is not None and prompt_embeds is not None:
754
+ raise ValueError(
755
+ f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
756
+ " only forward one of the two."
757
+ )
758
+ elif prompt is None and prompt_embeds is None:
759
+ raise ValueError(
760
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
761
+ )
762
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
763
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
764
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
765
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
766
+ elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)):
767
+ raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}")
768
+
769
+ if negative_prompt is not None and negative_prompt_embeds is not None:
770
+ raise ValueError(
771
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
772
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
773
+ )
774
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
775
+ raise ValueError(
776
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
777
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
778
+ )
779
+ elif negative_prompt_3 is not None and negative_prompt_embeds is not None:
780
+ raise ValueError(
781
+ f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:"
782
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
783
+ )
784
+
785
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
786
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
787
+ raise ValueError(
788
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
789
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
790
+ f" {negative_prompt_embeds.shape}."
791
+ )
792
+
793
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
794
+ raise ValueError(
795
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
796
+ )
797
+
798
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
799
+ raise ValueError(
800
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
801
+ )
802
+
803
+ if max_sequence_length is not None and max_sequence_length > 512:
804
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
805
+
806
+ def prepare_latents(
807
+ self,
808
+ batch_size,
809
+ num_channels_latents,
810
+ height,
811
+ width,
812
+ dtype,
813
+ device,
814
+ generator,
815
+ latents=None,
816
+ ):
817
+ if latents is not None:
818
+ return latents.to(device=device, dtype=dtype)
819
+
820
+ shape = (
821
+ batch_size,
822
+ num_channels_latents,
823
+ int(height) // self.vae_scale_factor,
824
+ int(width) // self.vae_scale_factor,
825
+ )
826
+
827
+ if isinstance(generator, list) and len(generator) != batch_size:
828
+ raise ValueError(
829
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
830
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
831
+ )
832
+
833
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
834
+
835
+ return latents
836
+
837
+ @property
838
+ def guidance_scale(self):
839
+ return self._guidance_scale
840
+
841
+ @property
842
+ def clip_skip(self):
843
+ return self._clip_skip
844
+
845
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
846
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
847
+ # corresponds to doing no classifier free guidance.
848
+ @property
849
+ def do_classifier_free_guidance(self):
850
+ return self._guidance_scale > 1
851
+
852
+ @property
853
+ def joint_attention_kwargs(self):
854
+ return self._joint_attention_kwargs
855
+
856
+ @property
857
+ def num_timesteps(self):
858
+ return self._num_timesteps
859
+
860
+ @property
861
+ def interrupt(self):
862
+ return self._interrupt
863
+
864
+
865
+ @torch.inference_mode()
866
+ def init_ipadapter(self, ip_adapter_path, image_encoder_path, nb_token, output_dim=2432):
867
+ from transformers import SiglipVisionModel, SiglipImageProcessor
868
+ state_dict = torch.load(ip_adapter_path, map_location="cpu")
869
+
870
+ device, dtype = self.transformer.device, self.transformer.dtype
871
+ image_encoder = SiglipVisionModel.from_pretrained(image_encoder_path)
872
+ image_processor = SiglipImageProcessor.from_pretrained(image_encoder_path)
873
+ image_encoder.eval()
874
+ image_encoder.to(device, dtype=dtype)
875
+ self.image_encoder = image_encoder
876
+ self.clip_image_processor = image_processor
877
+
878
+ sample_class = TimeResampler
879
+ image_proj_model = sample_class(
880
+ dim=1280,
881
+ depth=4,
882
+ dim_head=64,
883
+ heads=20,
884
+ num_queries=nb_token,
885
+ embedding_dim=1152,
886
+ output_dim=output_dim,
887
+ ff_mult=4,
888
+ timestep_in_dim=320,
889
+ timestep_flip_sin_to_cos=True,
890
+ timestep_freq_shift=0,
891
+ )
892
+ image_proj_model.eval()
893
+ image_proj_model.to(device, dtype=dtype)
894
+ key_name = image_proj_model.load_state_dict(state_dict["image_proj"], strict=False)
895
+ print(f"=> loading image_proj_model: {key_name}")
896
+
897
+ self.image_proj_model = image_proj_model
898
+
899
+
900
+ attn_procs = {}
901
+ transformer = self.transformer
902
+ for idx_name, name in enumerate(transformer.attn_processors.keys()):
903
+ hidden_size = transformer.config.attention_head_dim * transformer.config.num_attention_heads
904
+ ip_hidden_states_dim = transformer.config.attention_head_dim * transformer.config.num_attention_heads
905
+ ip_encoder_hidden_states_dim = transformer.config.caption_projection_dim
906
+
907
+ attn_procs[name] = JointIPAttnProcessor(
908
+ hidden_size=hidden_size,
909
+ cross_attention_dim=transformer.config.caption_projection_dim,
910
+ ip_hidden_states_dim=ip_hidden_states_dim,
911
+ ip_encoder_hidden_states_dim=ip_encoder_hidden_states_dim,
912
+ head_dim=transformer.config.attention_head_dim,
913
+ timesteps_emb_dim=1280,
914
+ ).to(device, dtype=dtype)
915
+
916
+ self.transformer.set_attn_processor(attn_procs)
917
+ tmp_ip_layers = torch.nn.ModuleList(self.transformer.attn_processors.values())
918
+
919
+ key_name = tmp_ip_layers.load_state_dict(state_dict["ip_adapter"], strict=False)
920
+ print(f"=> loading ip_adapter: {key_name}")
921
+
922
+
923
+ @torch.inference_mode()
924
+ def encode_clip_image_emb(self, clip_image, device, dtype):
925
+
926
+ # clip
927
+ clip_image_tensor = self.clip_image_processor(images=clip_image, return_tensors="pt").pixel_values
928
+ clip_image_tensor = clip_image_tensor.to(device, dtype=dtype)
929
+ clip_image_embeds = self.image_encoder(clip_image_tensor, output_hidden_states=True).hidden_states[-2]
930
+ clip_image_embeds = torch.cat([torch.zeros_like(clip_image_embeds), clip_image_embeds], dim=0)
931
+
932
+ return clip_image_embeds
933
+
934
+
935
+
936
+ @torch.no_grad()
937
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
938
+ def __call__(
939
+ self,
940
+ prompt: Union[str, List[str]] = None,
941
+ prompt_2: Optional[Union[str, List[str]]] = None,
942
+ prompt_3: Optional[Union[str, List[str]]] = None,
943
+ height: Optional[int] = None,
944
+ width: Optional[int] = None,
945
+ num_inference_steps: int = 28,
946
+ timesteps: List[int] = None,
947
+ guidance_scale: float = 7.0,
948
+ negative_prompt: Optional[Union[str, List[str]]] = None,
949
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
950
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
951
+ num_images_per_prompt: Optional[int] = 1,
952
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
953
+ latents: Optional[torch.FloatTensor] = None,
954
+ prompt_embeds: Optional[torch.FloatTensor] = None,
955
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
956
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
957
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
958
+ output_type: Optional[str] = "pil",
959
+ return_dict: bool = True,
960
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
961
+ clip_skip: Optional[int] = None,
962
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
963
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
964
+ max_sequence_length: int = 256,
965
+
966
+ # ipa
967
+ clip_image=None,
968
+ ipadapter_scale=1.0,
969
+ ):
970
+ r"""
971
+ Function invoked when calling the pipeline for generation.
972
+
973
+ Args:
974
+ prompt (`str` or `List[str]`, *optional*):
975
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
976
+ instead.
977
+ prompt_2 (`str` or `List[str]`, *optional*):
978
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
979
+ will be used instead
980
+ prompt_3 (`str` or `List[str]`, *optional*):
981
+ The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
982
+ will be used instead
983
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
984
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
985
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
986
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
987
+ num_inference_steps (`int`, *optional*, defaults to 50):
988
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
989
+ expense of slower inference.
990
+ timesteps (`List[int]`, *optional*):
991
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
992
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
993
+ passed will be used. Must be in descending order.
994
+ guidance_scale (`float`, *optional*, defaults to 7.0):
995
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
996
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
997
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
998
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
999
+ usually at the expense of lower image quality.
1000
+ negative_prompt (`str` or `List[str]`, *optional*):
1001
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
1002
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
1003
+ less than `1`).
1004
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
1005
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
1006
+ `text_encoder_2`. If not defined, `negative_prompt` is used instead
1007
+ negative_prompt_3 (`str` or `List[str]`, *optional*):
1008
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
1009
+ `text_encoder_3`. If not defined, `negative_prompt` is used instead
1010
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
1011
+ The number of images to generate per prompt.
1012
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
1013
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
1014
+ to make generation deterministic.
1015
+ latents (`torch.FloatTensor`, *optional*):
1016
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
1017
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
1018
+ tensor will ge generated by sampling using the supplied random `generator`.
1019
+ prompt_embeds (`torch.FloatTensor`, *optional*):
1020
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
1021
+ provided, text embeddings will be generated from `prompt` input argument.
1022
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
1023
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1024
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
1025
+ argument.
1026
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1027
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
1028
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
1029
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
1030
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
1031
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
1032
+ input argument.
1033
+ output_type (`str`, *optional*, defaults to `"pil"`):
1034
+ The output format of the generate image. Choose between
1035
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
1036
+ return_dict (`bool`, *optional*, defaults to `True`):
1037
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
1038
+ of a plain tuple.
1039
+ joint_attention_kwargs (`dict`, *optional*):
1040
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1041
+ `self.processor` in
1042
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1043
+ callback_on_step_end (`Callable`, *optional*):
1044
+ A function that calls at the end of each denoising steps during the inference. The function is called
1045
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
1046
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
1047
+ `callback_on_step_end_tensor_inputs`.
1048
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1049
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1050
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1051
+ `._callback_tensor_inputs` attribute of your pipeline class.
1052
+ max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
1053
+
1054
+ Examples:
1055
+
1056
+ Returns:
1057
+ [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] or `tuple`:
1058
+ [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] if `return_dict` is True, otherwise a
1059
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
1060
+ """
1061
+
1062
+ height = height or self.default_sample_size * self.vae_scale_factor
1063
+ width = width or self.default_sample_size * self.vae_scale_factor
1064
+
1065
+ # 1. Check inputs. Raise error if not correct
1066
+ self.check_inputs(
1067
+ prompt,
1068
+ prompt_2,
1069
+ prompt_3,
1070
+ height,
1071
+ width,
1072
+ negative_prompt=negative_prompt,
1073
+ negative_prompt_2=negative_prompt_2,
1074
+ negative_prompt_3=negative_prompt_3,
1075
+ prompt_embeds=prompt_embeds,
1076
+ negative_prompt_embeds=negative_prompt_embeds,
1077
+ pooled_prompt_embeds=pooled_prompt_embeds,
1078
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1079
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
1080
+ max_sequence_length=max_sequence_length,
1081
+ )
1082
+
1083
+ self._guidance_scale = guidance_scale
1084
+ self._clip_skip = clip_skip
1085
+ self._joint_attention_kwargs = joint_attention_kwargs
1086
+ self._interrupt = False
1087
+
1088
+ # 2. Define call parameters
1089
+ if prompt is not None and isinstance(prompt, str):
1090
+ batch_size = 1
1091
+ elif prompt is not None and isinstance(prompt, list):
1092
+ batch_size = len(prompt)
1093
+ else:
1094
+ batch_size = prompt_embeds.shape[0]
1095
+
1096
+ device = self._execution_device
1097
+ dtype = self.transformer.dtype
1098
+
1099
+ lora_scale = (
1100
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
1101
+ )
1102
+ (
1103
+ prompt_embeds,
1104
+ negative_prompt_embeds,
1105
+ pooled_prompt_embeds,
1106
+ negative_pooled_prompt_embeds,
1107
+ ) = self.encode_prompt(
1108
+ prompt=prompt,
1109
+ prompt_2=prompt_2,
1110
+ prompt_3=prompt_3,
1111
+ negative_prompt=negative_prompt,
1112
+ negative_prompt_2=negative_prompt_2,
1113
+ negative_prompt_3=negative_prompt_3,
1114
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1115
+ prompt_embeds=prompt_embeds,
1116
+ negative_prompt_embeds=negative_prompt_embeds,
1117
+ pooled_prompt_embeds=pooled_prompt_embeds,
1118
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
1119
+ device=device,
1120
+ clip_skip=self.clip_skip,
1121
+ num_images_per_prompt=num_images_per_prompt,
1122
+ max_sequence_length=max_sequence_length,
1123
+ lora_scale=lora_scale,
1124
+ )
1125
+
1126
+ if self.do_classifier_free_guidance:
1127
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
1128
+ pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
1129
+
1130
+ # 3. prepare clip emb
1131
+ clip_image = clip_image.resize((max(clip_image.size), max(clip_image.size)))
1132
+ clip_image_embeds = self.encode_clip_image_emb(clip_image, device, dtype)
1133
+
1134
+ # 4. Prepare timesteps
1135
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
1136
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1137
+ self._num_timesteps = len(timesteps)
1138
+
1139
+ # 5. Prepare latent variables
1140
+ num_channels_latents = self.transformer.config.in_channels
1141
+ latents = self.prepare_latents(
1142
+ batch_size * num_images_per_prompt,
1143
+ num_channels_latents,
1144
+ height,
1145
+ width,
1146
+ prompt_embeds.dtype,
1147
+ device,
1148
+ generator,
1149
+ latents,
1150
+ )
1151
+
1152
+ # 6. Denoising loop
1153
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1154
+ for i, t in enumerate(timesteps):
1155
+ if self.interrupt:
1156
+ continue
1157
+
1158
+ # expand the latents if we are doing classifier free guidance
1159
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1160
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1161
+ timestep = t.expand(latent_model_input.shape[0])
1162
+
1163
+ image_prompt_embeds, timestep_emb = self.image_proj_model(
1164
+ clip_image_embeds,
1165
+ timestep.to(dtype=latents.dtype),
1166
+ need_temb=True
1167
+ )
1168
+
1169
+ joint_attention_kwargs = dict(
1170
+ emb_dict=dict(
1171
+ ip_hidden_states=image_prompt_embeds,
1172
+ temb=timestep_emb,
1173
+ scale=ipadapter_scale,
1174
+ )
1175
+ )
1176
+
1177
+ noise_pred = self.transformer(
1178
+ hidden_states=latent_model_input,
1179
+ timestep=timestep,
1180
+ encoder_hidden_states=prompt_embeds,
1181
+ pooled_projections=pooled_prompt_embeds,
1182
+ joint_attention_kwargs=joint_attention_kwargs,
1183
+ return_dict=False,
1184
+ )[0]
1185
+
1186
+ # perform guidance
1187
+ if self.do_classifier_free_guidance:
1188
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1189
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1190
+
1191
+ # compute the previous noisy sample x_t -> x_t-1
1192
+ latents_dtype = latents.dtype
1193
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
1194
+
1195
+ if latents.dtype != latents_dtype:
1196
+ if torch.backends.mps.is_available():
1197
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1198
+ latents = latents.to(latents_dtype)
1199
+
1200
+ if callback_on_step_end is not None:
1201
+ callback_kwargs = {}
1202
+ for k in callback_on_step_end_tensor_inputs:
1203
+ callback_kwargs[k] = locals()[k]
1204
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1205
+
1206
+ latents = callback_outputs.pop("latents", latents)
1207
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1208
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1209
+ negative_pooled_prompt_embeds = callback_outputs.pop(
1210
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
1211
+ )
1212
+
1213
+ # call the callback, if provided
1214
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1215
+ progress_bar.update()
1216
+
1217
+ if XLA_AVAILABLE:
1218
+ xm.mark_step()
1219
+
1220
+ if output_type == "latent":
1221
+ image = latents
1222
+
1223
+ else:
1224
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
1225
+
1226
+ image = self.vae.decode(latents, return_dict=False)[0]
1227
+ image = self.image_processor.postprocess(image, output_type=output_type)
1228
+
1229
+ # Offload all models
1230
+ self.maybe_free_model_hooks()
1231
+
1232
+ if not return_dict:
1233
+ return (image,)
1234
+
1235
+ return StableDiffusion3PipelineOutput(images=image)