zhiweili commited on
Commit
b611dc3
1 Parent(s): ff70f49

add _default_height_width

Browse files
pipelines/pipeline_sdxl_adapter_inpaint.py CHANGED
@@ -1069,6 +1069,35 @@ class StableDiffusionXLInpaintPipeline(
1069
  self.vae.decoder.conv_in.to(dtype)
1070
  self.vae.decoder.mid_block.to(dtype)
1071
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1073
  def get_guidance_scale_embedding(
1074
  self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
 
1069
  self.vae.decoder.conv_in.to(dtype)
1070
  self.vae.decoder.mid_block.to(dtype)
1071
 
1072
+ # Copied from diffusers.pipelines.t2i_adapter.pipeline_stable_diffusion_adapter.StableDiffusionAdapterPipeline._default_height_width
1073
+ def _default_height_width(self, height, width, image):
1074
+ # NOTE: It is possible that a list of images have different
1075
+ # dimensions for each image, so just checking the first image
1076
+ # is not _exactly_ correct, but it is simple.
1077
+ while isinstance(image, list):
1078
+ image = image[0]
1079
+
1080
+ if height is None:
1081
+ if isinstance(image, PIL.Image.Image):
1082
+ height = image.height
1083
+ elif isinstance(image, torch.Tensor):
1084
+ height = image.shape[-2]
1085
+
1086
+ # round down to nearest multiple of `self.adapter.downscale_factor`
1087
+ height = (height // self.adapter.downscale_factor) * self.adapter.downscale_factor
1088
+
1089
+ if width is None:
1090
+ if isinstance(image, PIL.Image.Image):
1091
+ width = image.width
1092
+ elif isinstance(image, torch.Tensor):
1093
+ width = image.shape[-1]
1094
+
1095
+ # round down to nearest multiple of `self.adapter.downscale_factor`
1096
+ width = (width // self.adapter.downscale_factor) * self.adapter.downscale_factor
1097
+
1098
+ return height, width
1099
+
1100
+
1101
  # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
1102
  def get_guidance_scale_embedding(
1103
  self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32