Image-engine / main.py
saicharan1234's picture
Update main.py
96b5a68 verified
raw
history blame
6.28 kB
import os
from fastapi import FastAPI, File, UploadFile, Form
from fastapi.responses import StreamingResponse
import torch
from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler, DPMSolverSinglestepScheduler
from diffusers.pipelines import StableDiffusionInpaintPipeline, StableDiffusionXLInpaintPipeline
from huggingface_hub import hf_hub_download
import numpy as np
import random
from PIL import Image
import io
import os
app = FastAPI()
MAX_SEED = np.iinfo(np.int32).max
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load HF token from environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
# Load pipelines
pipe_xl_final = StableDiffusionXLPipeline.from_single_file(
hf_hub_download(repo_id="fluently/Fluently-XL-Final", filename="FluentlyXL-Final.safetensors", token=HF_TOKEN),
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_xl_final.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl_final.scheduler.config)
pipe_xl_final.to(device)
pipe_anime = StableDiffusionPipeline.from_pretrained(
"fluently/Fluently-anime",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_anime.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_anime.scheduler.config)
pipe_anime.to(device)
pipe_epic = StableDiffusionPipeline.from_pretrained(
"fluently/Fluently-epic",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_epic.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_epic.scheduler.config)
pipe_epic.to(device)
pipe_xl_inpaint = StableDiffusionXLInpaintPipeline.from_single_file(
"https://huggingface.co/fluently/Fluently-XL-v3-inpainting/blob/main/FluentlyXL-v3-inpainting.safetensors",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_xl_inpaint.to(device)
pipe_inpaint = StableDiffusionInpaintPipeline.from_pretrained(
"fluently/Fluently-v4-inpainting",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_inpaint.to(device)
pipe_xl = StableDiffusionXLPipeline.from_pretrained(
"fluently/Fluently-XL-v4",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_xl.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl.scheduler.config)
pipe_xl.to(device)
pipe_xl_lightning = StableDiffusionXLPipeline.from_pretrained(
"fluently/Fluently-XL-v3-lightning",
torch_dtype=torch.float16,
use_safetensors=True,
)
pipe_xl_lightning.scheduler = DPMSolverSinglestepScheduler.from_config(pipe_xl_lightning.scheduler.config, use_karras_sigmas=False, timestep_spacing="trailing", lower_order_final=True)
pipe_xl_lightning.to(device)
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
@app.post("/generate")
async def generate(
model: str = Form(...),
prompt: str = Form(...),
negative_prompt: str = Form(""),
use_negative_prompt: bool = Form(False),
seed: int = Form(0),
width: int = Form(1024),
height: int = Form(1024),
guidance_scale: float = Form(3),
randomize_seed: bool = Form(False),
inpaint_image: UploadFile = File(None),
mask_image: UploadFile = File(None),
blur_factor: float = Form(1.0),
strength: float = Form(0.75)
):
seed = int(randomize_seed_fn(seed, randomize_seed))
if not use_negative_prompt:
negative_prompt = ""
inpaint_image_pil = Image.open(io.BytesIO(await inpaint_image.read())) if inpaint_image else None
mask_image_pil = Image.open(io.BytesIO(await mask_image.read())) if mask_image else None
if model == "Fluently XL Final":
images = pipe_xl_final(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=25,
num_images_per_prompt=1,
output_type="pil",
).images
elif model == "Fluently Anime":
images = pipe_anime(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=30,
num_images_per_prompt=1,
output_type="pil",
).images
elif model == "Fluently Epic":
images = pipe_epic(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=30,
num_images_per_prompt=1,
output_type="pil",
).images
elif model == "Fluently XL v4":
images = pipe_xl(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=25,
num_images_per_prompt=1,
output_type="pil",
).images
elif model == "Fluently XL v3 Lightning":
images = pipe_xl_lightning(
prompt=prompt,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=2,
num_inference_steps=5,
num_images_per_prompt=1,
output_type="pil",
).images
elif model == "Fluently v4 inpaint" or model == "Fluently XL v3 inpaint":
blurred_mask = pipe_inpaint.mask_processor.blur(mask_image_pil, blur_factor=blur_factor)
images = pipe_inpaint(
prompt=prompt,
image=inpaint_image_pil,
mask_image=blurred_mask,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=30,
strength=strength,
num_images_per_prompt=1,
output_type="pil",
).images
img = images[0]
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
img_byte_arr.seek(0)
return StreamingResponse(img_byte_arr, media_type="image/png")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)