Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import KandinskyV22CombinedPipeline
|
2 |
+
import gradio as gr
|
3 |
+
from accelerate import Accelerator
|
4 |
+
import torch
|
5 |
+
from transformers import pipeline
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
accelerator = Accelerator()
|
9 |
+
pipe = accelerator.prepare(KandinskyV22CombinedPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float32))
|
10 |
+
pipe.unet.to(memory_format=torch.channels_last)
|
11 |
+
pipe.to("cpu")
|
12 |
+
apol=[]
|
13 |
+
def plex(prompt,negative_prompt,stips,stups,uno):
|
14 |
+
image = pipe([prompt]*2, [negative_prompt]*2,num_inference_steps=stips, num_prior_inference_steps=stups,prior_guidance_scale=uno, height=512, width=512)
|
15 |
+
for i, igs in enumerate(image["images"]):
|
16 |
+
apol.append(igs)
|
17 |
+
return apol
|
18 |
+
|
19 |
+
iface = gr.Interface(fn=plex,inputs=[gr.Textbox(label="prompt"),gr.Textbox(label="negative prompt", value="low quality, bad quality"), gr.Slider(label="inference_steps",minimum=1,step=1,maximum=10,value=5),gr.Slider(label="inference_steps_prior",minimum=1,step=1,maximum=10,value=5),gr.Slider(label="prior_guidance_scale",minimum=0.1,step=0.1,maximum=1.0,value=0.5)],outputs=gr.Gallery(columns=2), title="Txt2Img_KndskyV22_Cmbnd by JoPmt", description="Running on CPU, very slow!")
|
20 |
+
iface.queue(max_size=1)
|
21 |
+
iface.launch(max_threads=1)
|