Spaces:
Running
on
Zero
Running
on
Zero
AlekseyCalvin
commited on
Commit
•
01c68ec
1
Parent(s):
3f015ce
Update pipeline.py
Browse files- pipeline.py +0 -14
pipeline.py
CHANGED
@@ -336,19 +336,11 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
|
|
336 |
negative_prompt_embeds=None,
|
337 |
pooled_prompt_embeds=None,
|
338 |
negative_pooled_prompt_embeds=None,
|
339 |
-
callback_on_step_end_tensor_inputs=None,
|
340 |
max_sequence_length=None,
|
341 |
):
|
342 |
if height % 8 != 0 or width % 8 != 0:
|
343 |
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
344 |
|
345 |
-
if callback_on_step_end_tensor_inputs is not None and not all(
|
346 |
-
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
347 |
-
):
|
348 |
-
raise ValueError(
|
349 |
-
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
350 |
-
)
|
351 |
-
|
352 |
if prompt is not None and prompt_embeds is not None:
|
353 |
raise ValueError(
|
354 |
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
@@ -547,8 +539,6 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
|
|
547 |
output_type: Optional[str] = "pil",
|
548 |
return_dict: bool = True,
|
549 |
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
550 |
-
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
551 |
-
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
552 |
clip_skip: Optional[int] = None,
|
553 |
max_sequence_length: int = 300,
|
554 |
**kwargs,
|
@@ -568,7 +558,6 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
|
|
568 |
negative_prompt_embeds=negative_prompt_embeds,
|
569 |
pooled_prompt_embeds=pooled_prompt_embeds,
|
570 |
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
571 |
-
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
572 |
max_sequence_length=max_sequence_length,
|
573 |
)
|
574 |
|
@@ -734,8 +723,6 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
|
|
734 |
output_type: Optional[str] = "pil",
|
735 |
return_dict: bool = True,
|
736 |
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
737 |
-
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
738 |
-
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
739 |
clip_skip: Optional[int] = None,
|
740 |
max_sequence_length: int = 300,
|
741 |
**kwargs,
|
@@ -755,7 +742,6 @@ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFile
|
|
755 |
negative_prompt_embeds=negative_prompt_embeds,
|
756 |
pooled_prompt_embeds=pooled_prompt_embeds,
|
757 |
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
758 |
-
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
759 |
max_sequence_length=max_sequence_length,
|
760 |
)
|
761 |
|
|
|
336 |
negative_prompt_embeds=None,
|
337 |
pooled_prompt_embeds=None,
|
338 |
negative_pooled_prompt_embeds=None,
|
|
|
339 |
max_sequence_length=None,
|
340 |
):
|
341 |
if height % 8 != 0 or width % 8 != 0:
|
342 |
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
343 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
if prompt is not None and prompt_embeds is not None:
|
345 |
raise ValueError(
|
346 |
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
|
|
539 |
output_type: Optional[str] = "pil",
|
540 |
return_dict: bool = True,
|
541 |
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
|
|
|
|
542 |
clip_skip: Optional[int] = None,
|
543 |
max_sequence_length: int = 300,
|
544 |
**kwargs,
|
|
|
558 |
negative_prompt_embeds=negative_prompt_embeds,
|
559 |
pooled_prompt_embeds=pooled_prompt_embeds,
|
560 |
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
|
|
561 |
max_sequence_length=max_sequence_length,
|
562 |
)
|
563 |
|
|
|
723 |
output_type: Optional[str] = "pil",
|
724 |
return_dict: bool = True,
|
725 |
joint_attention_kwargs: Optional[Dict[str, Any]] = None,
|
|
|
|
|
726 |
clip_skip: Optional[int] = None,
|
727 |
max_sequence_length: int = 300,
|
728 |
**kwargs,
|
|
|
742 |
negative_prompt_embeds=negative_prompt_embeds,
|
743 |
pooled_prompt_embeds=pooled_prompt_embeds,
|
744 |
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
|
|
745 |
max_sequence_length=max_sequence_length,
|
746 |
)
|
747 |
|