flux4 / app.py
salomonsky's picture
Update app.py
644a3af verified
raw
history blame
8.01 kB
from pathlib import Path
from PIL import Image
import streamlit as st
from huggingface_hub import InferenceClient, AsyncInferenceClient
import asyncio
import os
import random
import numpy as np
import yaml
import requests
HUGGINGFACE_API = os.environ.get("HF_TOKEN")
try:
with open("config.yaml", "r") as file:
credentials = yaml.safe_load(file)
except Exception as e:
st.error(f"Error al cargar el archivo de configuración: {e}")
credentials = {"username": "", "password": ""}
MAX_SEED = np.iinfo(np.int32).max
client = AsyncInferenceClient()
llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
DATA_PATH = Path("./data")
DATA_PATH.mkdir(exist_ok=True)
def authenticate_user(username, password):
return username == credentials["username"] and password == credentials["password"]
async def gen(prompts, width, height, model_name, num_variants, prompt_checkbox, lora=None):
headers = {"Authorization": f"Bearer {HUGGINGFACE_API}"}
payload = {
"inputs": prompts,
"parameters": {
"width": width,
"height": height,
"num_inference_steps": 50,
"guidance_scale": 7.5
}
}
if lora:
payload["parameters"]["lora"] = lora
url = f"https://api-inference.huggingface.co/models/{model_name}"
response = requests.post(url, headers=headers, json=payload)
if response.status_code != 200:
raise Exception(f"Error: {response.status_code}, {response.text}")
return response.json()
def list_saved_images():
return sorted(DATA_PATH.glob("*.jpg"), key=os.path.getmtime, reverse=True)
def display_gallery():
st.header("Galería de Imágenes Guardadas")
images = list_saved_images()
if images:
cols = st.columns(8)
for i, image_file in enumerate(images):
with cols[i % 8]:
st.image(str(image_file), caption=image_file.name, use_column_width=True)
prompt = get_prompt_for_image(image_file.name)
st.write(prompt[:300])
if st.button(f"Borrar", key=f"delete_{i}_{image_file.name}"):
os.remove(image_file)
st.success("Imagen borrada")
display_gallery()
else:
st.info("No hay imágenes guardadas.")
def save_prompt(prompt):
with open(DATA_PATH / "prompts.txt", "a") as f:
f.write(prompt + "\n")
def run_async(func, *args):
return asyncio.run(func(*args))
async def improve_prompt(prompt):
try:
instructions = [
"With my idea create a vibrant description for a detailed txt2img prompt, 300 characters max.",
"With my idea write a creative and detailed text-to-image prompt in English, 300 characters max.",
"With my idea generate a descriptive and visual txt2img prompt in English, 300 characters max.",
"With my idea describe a photorealistic with illumination txt2img prompt in English, 300 characters max.",
"With my idea give a realistic and elegant txt2img prompt in English, 300 characters max.",
"With my idea conform a visually dynamic and surreal txt2img prompt in English, 300 characters max.",
"With my idea realize an artistic and cinematic txt2img prompt in English, 300 characters max.",
"With my idea make a narrative and immersive txt2img prompt in English, 300 characters max."
]
instruction = random.choice(instructions)
formatted_prompt = f"{prompt}: {instruction}"
response = llm_client.text_generation(formatted_prompt, max_new_tokens=100)
return response['generated_text'][:100] if 'generated_text' in response else response.strip()
except Exception as e:
return f"Error mejorando el prompt: {e}"
def save_image(image, file_name, prompt=None):
image_path = DATA_PATH / file_name
if image_path.exists():
st.warning(f"La imagen '{file_name}' ya existe en la galería. No se guardó.")
return None
else:
image.save(image_path, format="JPEG")
if prompt:
save_prompt(f"{file_name}: {prompt}")
return image_path
async def generate_image(prompt, width, height, seed, model_name):
if seed == -1:
seed = random.randint(0, MAX_SEED)
image = await client.text_to_image(prompt=prompt, height=height, width=width, model=model_name)
return image, seed
def get_prompt_for_image(image_name):
prompts = {}
try:
with open(DATA_PATH / "prompts.txt", "r") as f:
for line in f:
if line.startswith(image_name):
prompts[image_name] = line.split(": ", 1)[1].strip()
except FileNotFoundError:
return "No hay prompt asociado."
return prompts.get(image_name, "No hay prompt asociado.")
def login_form():
st.title("Iniciar Sesión")
username = st.text_input("Usuario", value="admin")
password = st.text_input("Contraseña", value="flux3x", type="password")
if st.button("Iniciar Sesión"):
if authenticate_user(username, password):
st.success("Autenticación exitosa.")
st.session_state['authenticated'] = True
else:
st.error("Credenciales incorrectas. Intenta de nuevo.")
async def generate_variations(prompt, num_variants, use_enhanced):
prompts = set()
while len(prompts) < num_variants:
if use_enhanced:
enhanced_prompt = await improve_prompt(prompt)
prompts.add(enhanced_prompt)
else:
prompts.add(prompt)
return list(prompts)
async def main():
st.set_page_config(layout="wide")
if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
login_form()
return
st.title("Flux + Multiple Images")
prompt = st.sidebar.text_area("Descripción de la imagen", height=150, max_chars=500)
style_option = st.sidebar.selectbox("Selecciona un estilo",
["realistic", "photorealistic", "illustration",
"cartoon", "comic", "imaginative", "abstract"])
prompt_with_style = f"{prompt}, {style_option} style"
lora_option = st.sidebar.selectbox("Selecciona un LoRA",
["XLabs-AI/flux-RealismLora", "XLabs-AI/flux-RealismLora"])
format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"])
prompt_checkbox = st.sidebar.checkbox("Prompt Enhancer")
model_option = st.sidebar.selectbox("Modelo",
["black-forest-labs/FLUX.1-schnell",
"black-forest-labs/FLUX.1-dev",
"enhanceaiteam/Flux-Uncensored-V2",
"enhanceaiteam/Flux-Uncensored"])
width, height = (360, 640) if format_option == "9:16" else (640, 360) if format_option == "16:9" else (640, 640)
if prompt_checkbox:
num_variants = st.sidebar.slider("Número de imágenes a generar", 1, 8, 1)
else:
num_variants = 1
model_name = model_option
if prompt_checkbox:
with st.spinner("Generando prompts mejorados..."):
prompts = await generate_variations(prompt_with_style, num_variants, True)
else:
prompts = [prompt_with_style]
if st.sidebar.button("Generar Imágenes"):
with st.spinner("Generando imágenes..."):
try:
results = await gen(prompts, width, height, model_name, num_variants, prompt_checkbox, lora_option)
st.session_state['generated_image_paths'] = results
for result in results:
st.image(result, caption="Imagen Generada")
except Exception as e:
st.error(f"Error al generar las imágenes: {str(e)}")
display_gallery()
if __name__ == "__main__":
asyncio.run(main())