osanseviero multimodalart HF staff commited on
Commit
f35400c
1 Parent(s): 3c7cefd

New direct-to-PIL code (#2)

Browse files

- New direct-to-PIL code (6696bcdd3e58a49ad6462d867e80b0d9c4d04098)


Co-authored-by: Multimodal AI art <[email protected]>

Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -9,11 +9,8 @@ pipeline = LatentDiffusionUncondPipeline.from_pretrained("CompVis/ldm-celebahq-2
9
 
10
  def predict(steps=1, seed=42):
11
  generator = torch.manual_seed(seed)
12
- image = pipeline(generator=generator, num_inference_steps=steps)["sample"]
13
- image_processed = image.cpu().permute(0, 2, 3, 1)
14
- image_processed = (image_processed + 1.0) * 127.5
15
- image_processed = image_processed.clamp(0, 255).numpy().astype(np.uint8)
16
- return PIL.Image.fromarray(image_processed[0])
17
 
18
  random_seed = random.randint(0, 2147483647)
19
  gr.Interface(
 
9
 
10
  def predict(steps=1, seed=42):
11
  generator = torch.manual_seed(seed)
12
+ images = pipeline(generator=generator, num_inference_steps=steps)["sample"]
13
+ return images[0]
 
 
 
14
 
15
  random_seed = random.randint(0, 2147483647)
16
  gr.Interface(