import gradio as gr from PIL import Image from io import BytesIO import torch import os #os.system("pip install git+https://github.com/fffiloni/diffusers") from diffusers import DiffusionPipeline, DDIMScheduler from imagic import ImagicStableDiffusionPipeline has_cuda = torch.cuda.is_available() device = "cuda" pipe = ImagicStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, #custom_pipeline=ImagicStableDiffusionPipeline, scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False) ).to(device) generator = torch.Generator("cuda").manual_seed(0) def train(prompt, init_image, trn_text, trn_steps): init_image = Image.open(init_image).convert("RGB") init_image = init_image.resize((256, 256)) res = pipe.train( prompt, init_image, guidance_scale=7.5, num_inference_steps=50, generator=generator, text_embedding_optimization_steps=trn_text, model_fine_tuning_optimization_steps=trn_steps) with torch.no_grad(): torch.cuda.empty_cache() return "Training is finished !", gr.update(value=0), gr.update(value=0) def generate(prompt, init_image, trn_text, trn_steps): init_image = Image.open(init_image).convert("RGB") init_image = init_image.resize((256, 256)) res = pipe.train( prompt, init_image, guidance_scale=7.5, num_inference_steps=50, generator=generator, text_embedding_optimization_steps=trn_text, model_fine_tuning_optimization_steps=trn_steps) with torch.no_grad(): torch.cuda.empty_cache() res = pipe(alpha=1) return res.images[0] title = """
Text-Based Real Image Editing with Diffusion Models
This pipeline aims to implement this paper to Stable Diffusion, allowing for real-world image editing.
You can skip the queue by duplicating this space or run the Colab version: