rikdas's picture
running space
4c99458
raw
history blame
3.27 kB
from diffusers import StableDiffusionPipeline
import torch
import requests
from PIL import Image
from io import BytesIO
from diffusers import StableDiffusionImg2ImgPipeline
device = "cpu"
model_id = "pwc-india/tartan_weights"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
pipe2 = StableDiffusionImg2ImgPipeline(**pipe.components).to(device)
import gradio as gr
def generate_txt2img(inp_txt, inp_neg, num_inf_steps, width, height, g_scale, num_imgs):
return pipe(prompt=inp_txt, negative_prompt=inp_neg, num_inference_steps=num_inf_steps, width=width, height=height, guidance_scale=g_scale, num_images_per_prompt=num_imgs).images
def generate_img2img(inp_img, inp_txt, inp_neg, num_inf_steps, g_scale, num_imgs, strength):
image = Image.fromarray(inp_img)
image = image.resize((512, 512))
return pipe2(prompt=inp_txt, negative_prompt=inp_neg, num_inference_steps=num_inf_steps, image=image, strength=strength, guidance_scale=g_scale, num_images_per_prompt=num_imgs).images
with gr.Blocks() as demo:
with gr.Tab("Text2Image"):
with gr.Group():
inp_txt = gr.Text(show_label=False, placeholder="Enter your prompt here...")
inp_neg = gr.Text(show_label=False, placeholder="Enter your negative prompt here...")
with gr.Accordion("Extra parameters", open=False):
num_inf_steps = gr.Slider(label="Number of inference steps", minimum=20, maximum=100, value=50, step=1)
with gr.Row():
with gr.Column():
width = gr.Slider(label="Width(pixels)", minimum=256, maximum=1024, value=512, step=1)
with gr.Column():
height = gr.Slider(label="Height(pixels)", minimum=256, maximum=1024, value=512, step=1)
g_scale = gr.Slider(label="Guidance scale", minimum=1, maximum=10, value=7.5, step=0.5)
num_imgs = gr.Slider(label="Number of images", minimum=1, maximum=10, value=1, step=1)
btn = gr.Button("Generate")
out_img = gr.Gallery(preview=True)
btn.click(fn=generate_txt2img, inputs=[inp_txt, inp_neg, num_inf_steps, width, height, g_scale, num_imgs], outputs=[out_img])
with gr.Tab("Image2Image"):
with gr.Group():
inp_img = gr.Image()
inp_txt2 = gr.Text(show_label=False, placeholder="Enter your prompt here...")
inp_neg2 = gr.Text(show_label=False, placeholder="Enter your negative prompt here...")
with gr.Accordion("Extra parameters", open=False):
num_inf_steps2 = gr.Slider(label="Number of inference steps", minimum=20, maximum=100, value=50, step=1)
g_scale2 = gr.Slider(label="Guidance scale", minimum=1, maximum=10, value=7.5, step=0.5)
num_imgs2 = gr.Slider(label="Number of images", minimum=1, maximum=10, value=1, step=1)
strength = gr.Slider(label="Strength", minimum=0, maximum=1, value=0.8, step=0.1)
btn2 = gr.Button("Generate")
out_img2 = gr.Gallery(preview=True)
btn2.click(fn=generate_img2img, inputs=[inp_img, inp_txt2, inp_neg2, num_inf_steps2, g_scale2, num_imgs2, strength], outputs=[out_img2])
demo.launch(debug=True)