Spaces:
Running
Running
from diffusers import KandinskyV22CombinedPipeline | |
import gradio as gr | |
from accelerate import Accelerator | |
import torch, os, random | |
from transformers import pipeline | |
from PIL import Image | |
accelerator = Accelerator() | |
pipe = accelerator.prepare(KandinskyV22CombinedPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float32, use_safetensors=True, safety_checker=False)) | |
pipe = pipe.to("cpu") | |
apol=[] | |
def plex(prompt,negative_prompt,stips,uno): | |
apol=[] | |
generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 4876364)) | |
image = pipe(prompt=[prompt]*2, negative_prompt=[negative_prompt]*2,num_inference_steps=stips, prior_guidance_scale=uno, height=512, width=512, generator=generator) | |
for i, igs in enumerate(image["images"]): | |
apol.append(igs) | |
return apol | |
iface = gr.Interface(fn=plex,inputs=[gr.Textbox(label="prompt"),gr.Textbox(label="negative prompt", value="low quality, bad quality"), gr.Slider(label="inference_steps",minimum=1,step=1,maximum=10,value=10),gr.Slider(label="prior_guidance_scale",minimum=0.1,step=0.1,maximum=1.0,value=0.5)],outputs=gr.Gallery(columns=2), title="Txt2Img_KndskyV22_Cmbnd by JoPmt", description="Running on CPU, very slow!") | |
iface.queue(max_size=1,api_open=False) | |
iface.launch(max_threads=1) |