Update implementation so that it actually uses the custom classes
#3
by
sayakpaul
HF staff
- opened
- my_pipeline.py +5 -4
my_pipeline.py
CHANGED
@@ -24,7 +24,9 @@ from diffusers.loaders import (
|
|
24 |
StableDiffusionXLLoraLoaderMixin,
|
25 |
TextualInversionLoaderMixin,
|
26 |
)
|
27 |
-
from diffusers.models import AutoencoderKL
|
|
|
|
|
28 |
from diffusers.models.attention_processor import (
|
29 |
AttnProcessor2_0,
|
30 |
LoRAAttnProcessor2_0,
|
@@ -32,7 +34,6 @@ from diffusers.models.attention_processor import (
|
|
32 |
XFormersAttnProcessor,
|
33 |
)
|
34 |
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
35 |
-
from diffusers.schedulers import KarrasDiffusionSchedulers
|
36 |
from diffusers.utils import (
|
37 |
USE_PEFT_BACKEND,
|
38 |
is_invisible_watermark_available,
|
@@ -145,8 +146,8 @@ class MyPipeline(
|
|
145 |
text_encoder_2: CLIPTextModelWithProjection,
|
146 |
tokenizer: CLIPTokenizer,
|
147 |
tokenizer_2: CLIPTokenizer,
|
148 |
-
unet:
|
149 |
-
scheduler:
|
150 |
force_zeros_for_empty_prompt: bool = True,
|
151 |
add_watermarker: Optional[bool] = None,
|
152 |
):
|
|
|
24 |
StableDiffusionXLLoraLoaderMixin,
|
25 |
TextualInversionLoaderMixin,
|
26 |
)
|
27 |
+
from diffusers.models import AutoencoderKL
|
28 |
+
from .scheduler.my_scheduler import MyScheduler
|
29 |
+
from .unet.my_unet_model import MyUNetModel
|
30 |
from diffusers.models.attention_processor import (
|
31 |
AttnProcessor2_0,
|
32 |
LoRAAttnProcessor2_0,
|
|
|
34 |
XFormersAttnProcessor,
|
35 |
)
|
36 |
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
|
|
37 |
from diffusers.utils import (
|
38 |
USE_PEFT_BACKEND,
|
39 |
is_invisible_watermark_available,
|
|
|
146 |
text_encoder_2: CLIPTextModelWithProjection,
|
147 |
tokenizer: CLIPTokenizer,
|
148 |
tokenizer_2: CLIPTokenizer,
|
149 |
+
unet: MyUNetModel,
|
150 |
+
scheduler: MyScheduler,
|
151 |
force_zeros_for_empty_prompt: bool = True,
|
152 |
add_watermarker: Optional[bool] = None,
|
153 |
):
|