skytnt commited on
Commit
fca2f57
1 Parent(s): 099cac4

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +6 -6
pipeline.py CHANGED
@@ -472,7 +472,7 @@ class StableDiffusionLongPromptPipeline(DiffusionPipeline):
472
  eta: float = 0.0,
473
  generator: Optional[torch.Generator] = None,
474
  latents: Optional[torch.FloatTensor] = None,
475
- max_embeddings_multiples: Optional[int] = 1,
476
  output_type: Optional[str] = "pil",
477
  return_dict: bool = True,
478
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -513,7 +513,7 @@ class StableDiffusionLongPromptPipeline(DiffusionPipeline):
513
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
514
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
515
  tensor will ge generated by sampling using the supplied random `generator`.
516
- max_embeddings_multiples (`int`, *optional*, defaults to `1`):
517
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
518
  output_type (`str`, *optional*, defaults to `"pil"`):
519
  The output format of the generate image. Choose between
@@ -684,7 +684,7 @@ class StableDiffusionLongPromptPipeline(DiffusionPipeline):
684
  num_images_per_prompt: Optional[int] = 1,
685
  eta: Optional[float] = 0.0,
686
  generator: Optional[torch.Generator] = None,
687
- max_embeddings_multiples: Optional[int] = 1,
688
  output_type: Optional[str] = "pil",
689
  return_dict: bool = True,
690
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -726,7 +726,7 @@ class StableDiffusionLongPromptPipeline(DiffusionPipeline):
726
  generator (`torch.Generator`, *optional*):
727
  A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
728
  deterministic.
729
- max_embeddings_multiples (`int`, *optional*, defaults to `1`):
730
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
731
  output_type (`str`, *optional*, defaults to `"pil"`):
732
  The output format of the generate image. Choose between
@@ -914,7 +914,7 @@ class StableDiffusionLongPromptPipeline(DiffusionPipeline):
914
  num_images_per_prompt: Optional[int] = 1,
915
  eta: Optional[float] = 0.0,
916
  generator: Optional[torch.Generator] = None,
917
- max_embeddings_multiples: Optional[int] = 1,
918
  output_type: Optional[str] = "pil",
919
  return_dict: bool = True,
920
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
@@ -960,7 +960,7 @@ class StableDiffusionLongPromptPipeline(DiffusionPipeline):
960
  generator (`torch.Generator`, *optional*):
961
  A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
962
  deterministic.
963
- max_embeddings_multiples (`int`, *optional*, defaults to `1`):
964
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
965
  output_type (`str`, *optional*, defaults to `"pil"`):
966
  The output format of the generate image. Choose between
 
472
  eta: float = 0.0,
473
  generator: Optional[torch.Generator] = None,
474
  latents: Optional[torch.FloatTensor] = None,
475
+ max_embeddings_multiples: Optional[int] = 3,
476
  output_type: Optional[str] = "pil",
477
  return_dict: bool = True,
478
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
 
513
  Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
514
  generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
515
  tensor will ge generated by sampling using the supplied random `generator`.
516
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
517
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
518
  output_type (`str`, *optional*, defaults to `"pil"`):
519
  The output format of the generate image. Choose between
 
684
  num_images_per_prompt: Optional[int] = 1,
685
  eta: Optional[float] = 0.0,
686
  generator: Optional[torch.Generator] = None,
687
+ max_embeddings_multiples: Optional[int] = 3,
688
  output_type: Optional[str] = "pil",
689
  return_dict: bool = True,
690
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
 
726
  generator (`torch.Generator`, *optional*):
727
  A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
728
  deterministic.
729
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
730
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
731
  output_type (`str`, *optional*, defaults to `"pil"`):
732
  The output format of the generate image. Choose between
 
914
  num_images_per_prompt: Optional[int] = 1,
915
  eta: Optional[float] = 0.0,
916
  generator: Optional[torch.Generator] = None,
917
+ max_embeddings_multiples: Optional[int] = 3,
918
  output_type: Optional[str] = "pil",
919
  return_dict: bool = True,
920
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
 
960
  generator (`torch.Generator`, *optional*):
961
  A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
962
  deterministic.
963
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
964
  The max multiple length of prompt embeddings compared to the max output length of text encoder.
965
  output_type (`str`, *optional*, defaults to `"pil"`):
966
  The output format of the generate image. Choose between