Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,877 Bytes
e3effab efc213f 44f98f9 efc213f 6328bf1 44f98f9 efc213f e3effab efc213f e3effab ab9ca3b efc213f ab9ca3b a7c4dc1 ab9ca3b efc213f 95e3c69 ab9ca3b efc213f 44f98f9 a7c4dc1 4db18c7 efc213f e3effab efc213f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import gradio as gr
import torch
from diffusers import FluxPipeline
from huggingface_hub import HfApi
import spaces
import random
"""
This application uses the Flux.1 Lite model:
@article{flux1-lite,
title={Flux.1 Lite: Distilling Flux1.dev for Efficient Text-to-Image Generation},
author={Daniel Verdú, Javier Martín},
email={[email protected], [email protected]},
year={2024},
}
"""
@spaces.GPU(duration=70)
def initialize_model():
model_id = "Freepik/flux.1-lite-8B-alpha"
pipe = FluxPipeline.from_pretrained(
model_id,
torch_dtype=torch.bfloat16
).to("cuda")
return pipe
@spaces.GPU(duration=70)
def generate_image(
prompt,
guidance_scale=3.5,
width=1024,
height=1024
):
try:
# Initialize model within the GPU context
pipe = initialize_model()
# Generate random seed
seed = random.randint(1, 1000000)
with torch.inference_mode():
image = pipe(
prompt=prompt,
generator=torch.Generator(device="cuda").manual_seed(seed),
num_inference_steps=25, # Fixed steps
guidance_scale=guidance_scale,
height=height,
width=width,
).images[0]
return image
except Exception as e:
print(f"Error during image generation: {str(e)}")
raise e
# Create the Gradio interface
demo = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(
label="Prompt",
placeholder="Enter your image description here...",
value="a glass cup with beer, inside the beer a scuba diver, with a beautiful sunset background"
),
gr.Slider(
minimum=1,
maximum=20,
value=3.5,
label="Guidance Scale",
step=0.5
),
gr.Slider(
minimum=128,
maximum=1024,
value=1024,
label="Width",
step=64
),
gr.Slider(
minimum=128,
maximum=1024,
value=1024,
label="Height",
step=64
)
],
outputs=gr.Image(type="pil", label="Generated Image"),
title="Freepix Flux.1-lite-8B-alpha Model (Zero-GPU)",
description="Generate images using Freepik's Flux model with Zero-GPU allocation. Using 25 fixed steps and random seed for each generation.",
examples=[
["A close-up image of a green alien with fluorescent skin in the middle of a dark purple forest", 3.5, 1024, 1024],
["a glass cup with beer, inside the beer a scuba diver, with a beautiful sunset backgroudn", 3.5, 1024, 1024]
] # Properly closed the examples list
) # Properly closed the Interface parenthesis
# Launch the app
if __name__ == "__main__":
demo.launch() |