AlekseyCalvin commited on
Commit
a67d007
1 Parent(s): 0ca490b

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +1 -31
pipeline.py CHANGED
@@ -67,39 +67,9 @@ def prepare_timesteps(
67
 
68
  # FLUX pipeline function
69
  class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
70
-
71
- r"""
72
- The Flux pipeline for text-to-image generation.
73
-
74
- Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
75
-
76
- Args:
77
- transformer ([`FluxTransformer2DModel`]):
78
- Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
79
- scheduler ([`FlowMatchEulerDiscreteScheduler`]):
80
- A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
81
- vae ([`AutoencoderKL`]):
82
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
83
- text_encoder ([`CLIPTextModel`]):
84
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
85
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
86
- text_encoder_2 ([`T5EncoderModel`]):
87
- [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
88
- the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
89
- tokenizer (`CLIPTokenizer`):
90
- Tokenizer of class
91
- [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
92
- tokenizer_2 (`T5TokenizerFast`):
93
- Second Tokenizer of class
94
- [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
95
- """
96
-
97
- model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
98
- _optional_components = []
99
- _callback_tensor_inputs = ["latents", "prompt_embeds"] model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
100
  _optional_components = []
101
  _callback_tensor_inputs = ["latents", "prompt_embeds"]
102
-
103
  def __init__(
104
  self,
105
  scheduler: FlowMatchEulerDiscreteScheduler,
 
67
 
68
  # FLUX pipeline function
69
  class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  _optional_components = []
71
  _callback_tensor_inputs = ["latents", "prompt_embeds"]
72
+
73
  def __init__(
74
  self,
75
  scheduler: FlowMatchEulerDiscreteScheduler,