import requests import torch from PIL import Image from io import BytesIO import time from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-unclip", torch_dtype=torch.float16, variant="fp16") pipe.to("cuda") url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/image%20(10).png" response = requests.get(url) init_image = Image.open(BytesIO(response.content)).convert("RGB") # init_image = init_image.resize((768, 512)) prompt = "A fantasy landscape, trending on artstation" images = pipe(4 * [init_image]).images for i in range(len(images)): images[i].save(f"fantasy_landscape_{i}.png")