Spaces:
Running
Running
import gradio as gr | |
import spaces | |
import torch | |
from diffusers import DiffusionPipeline | |
def inference( | |
model_id: str, | |
prompt: str, | |
negative_prompt: str = "", | |
progress=gr.Progress(track_tqdm=True), | |
): | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
pipe = DiffusionPipeline.from_pretrained( | |
model_id, | |
torch_dtype=torch.float16, | |
).to(device) | |
image = pipe( | |
prompt, | |
negative_prompt=negative_prompt, | |
).images[0] | |
return image | |
if __name__ == "__main__": | |
demo = gr.Interface( | |
fn=inference, | |
inputs=[ | |
gr.Text( | |
label="Model ID", | |
value="stabilityai/stable-diffusion-3-medium-diffusers", | |
), | |
gr.Text(label="Prompt", value=""), | |
gr.Text(label="Negative Prompt", value=""), | |
], | |
outputs=[ | |
gr.Image(label="Image", type="pil"), | |
], | |
) | |
demo.launch() | |