File size: 8,007 Bytes
644a3af
78936f3
644a3af
 
 
 
b2d8766
644a3af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7786aa
644a3af
b2d8766
644a3af
 
 
b2d8766
644a3af
 
b2d8766
644a3af
1922d6a
644a3af
 
 
 
 
 
 
 
 
 
 
 
 
 
1922d6a
644a3af
b2d8766
644a3af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78936f3
644a3af
78936f3
644a3af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
from pathlib import Path
from PIL import Image
import streamlit as st
from huggingface_hub import InferenceClient, AsyncInferenceClient
import asyncio
import os
import random
import numpy as np
import yaml
import requests

HUGGINGFACE_API = os.environ.get("HF_TOKEN")

try:
    with open("config.yaml", "r") as file:
        credentials = yaml.safe_load(file)
except Exception as e:
    st.error(f"Error al cargar el archivo de configuración: {e}")
    credentials = {"username": "", "password": ""}

MAX_SEED = np.iinfo(np.int32).max
client = AsyncInferenceClient()
llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
DATA_PATH = Path("./data")
DATA_PATH.mkdir(exist_ok=True)

def authenticate_user(username, password):
    return username == credentials["username"] and password == credentials["password"]

async def gen(prompts, width, height, model_name, num_variants, prompt_checkbox, lora=None):
    headers = {"Authorization": f"Bearer {HUGGINGFACE_API}"}
    payload = {
        "inputs": prompts,
        "parameters": {
            "width": width,
            "height": height,
            "num_inference_steps": 50,
            "guidance_scale": 7.5
        }
    }
    if lora:
        payload["parameters"]["lora"] = lora
    url = f"https://api-inference.huggingface.co/models/{model_name}"
    
    response = requests.post(url, headers=headers, json=payload)
    if response.status_code != 200:
        raise Exception(f"Error: {response.status_code}, {response.text}")
    
    return response.json()
    
def list_saved_images():
    return sorted(DATA_PATH.glob("*.jpg"), key=os.path.getmtime, reverse=True)

def display_gallery():
    st.header("Galería de Imágenes Guardadas")
    images = list_saved_images()
    if images:
        cols = st.columns(8)
        for i, image_file in enumerate(images):
            with cols[i % 8]:
                st.image(str(image_file), caption=image_file.name, use_column_width=True)
                prompt = get_prompt_for_image(image_file.name)
                st.write(prompt[:300])

                if st.button(f"Borrar", key=f"delete_{i}_{image_file.name}"):
                    os.remove(image_file)
                    st.success("Imagen borrada")
                    display_gallery()
    else:
        st.info("No hay imágenes guardadas.")

def save_prompt(prompt):
    with open(DATA_PATH / "prompts.txt", "a") as f:
        f.write(prompt + "\n")

def run_async(func, *args):
    return asyncio.run(func(*args))

async def improve_prompt(prompt):
    try:
        instructions = [
            "With my idea create a vibrant description for a detailed txt2img prompt, 300 characters max.",
            "With my idea write a creative and detailed text-to-image prompt in English, 300 characters max.",
            "With my idea generate a descriptive and visual txt2img prompt in English, 300 characters max.",
            "With my idea describe a photorealistic with illumination txt2img prompt in English, 300 characters max.",
            "With my idea give a realistic and elegant txt2img prompt in English, 300 characters max.",
            "With my idea conform a visually dynamic and surreal txt2img prompt in English, 300 characters max.",
            "With my idea realize an artistic and cinematic txt2img prompt in English, 300 characters max.",
            "With my idea make a narrative and immersive txt2img prompt in English, 300 characters max."
        ]
        instruction = random.choice(instructions)
        formatted_prompt = f"{prompt}: {instruction}"
        response = llm_client.text_generation(formatted_prompt, max_new_tokens=100)
        return response['generated_text'][:100] if 'generated_text' in response else response.strip()
    except Exception as e:
        return f"Error mejorando el prompt: {e}"

def save_image(image, file_name, prompt=None):
    image_path = DATA_PATH / file_name
    if image_path.exists():
        st.warning(f"La imagen '{file_name}' ya existe en la galería. No se guardó.")
        return None
    else:
        image.save(image_path, format="JPEG")
        if prompt:
            save_prompt(f"{file_name}: {prompt}")
        return image_path

async def generate_image(prompt, width, height, seed, model_name):
    if seed == -1:
        seed = random.randint(0, MAX_SEED)
    image = await client.text_to_image(prompt=prompt, height=height, width=width, model=model_name)
    return image, seed

def get_prompt_for_image(image_name):
    prompts = {}
    try:
        with open(DATA_PATH / "prompts.txt", "r") as f:
            for line in f:
                if line.startswith(image_name):
                    prompts[image_name] = line.split(": ", 1)[1].strip()
    except FileNotFoundError:
        return "No hay prompt asociado."
    return prompts.get(image_name, "No hay prompt asociado.")

def login_form():
    st.title("Iniciar Sesión")
    username = st.text_input("Usuario", value="admin")
    password = st.text_input("Contraseña", value="flux3x", type="password")
    if st.button("Iniciar Sesión"):
        if authenticate_user(username, password):
            st.success("Autenticación exitosa.")
            st.session_state['authenticated'] = True
        else:
            st.error("Credenciales incorrectas. Intenta de nuevo.")

async def generate_variations(prompt, num_variants, use_enhanced):
    prompts = set() 
    while len(prompts) < num_variants:
        if use_enhanced:
            enhanced_prompt = await improve_prompt(prompt)
            prompts.add(enhanced_prompt)
        else:
            prompts.add(prompt)
    return list(prompts)

async def main():
    st.set_page_config(layout="wide")

    if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
        login_form()
        return

    st.title("Flux + Multiple Images")
    prompt = st.sidebar.text_area("Descripción de la imagen", height=150, max_chars=500)

    style_option = st.sidebar.selectbox("Selecciona un estilo", 
                                        ["realistic", "photorealistic", "illustration", 
                                         "cartoon", "comic", "imaginative", "abstract"])

    prompt_with_style = f"{prompt}, {style_option} style"

    lora_option = st.sidebar.selectbox("Selecciona un LoRA", 
                                       ["XLabs-AI/flux-RealismLora", "XLabs-AI/flux-RealismLora"])

    format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"]) 
    prompt_checkbox = st.sidebar.checkbox("Prompt Enhancer")
    model_option = st.sidebar.selectbox("Modelo", 
                                        ["black-forest-labs/FLUX.1-schnell", 
                                         "black-forest-labs/FLUX.1-dev", 
                                         "enhanceaiteam/Flux-Uncensored-V2", 
                                         "enhanceaiteam/Flux-Uncensored"])

    width, height = (360, 640) if format_option == "9:16" else (640, 360) if format_option == "16:9" else (640, 640)

    if prompt_checkbox:
        num_variants = st.sidebar.slider("Número de imágenes a generar", 1, 8, 1)
    else:
        num_variants = 1

    model_name = model_option

    if prompt_checkbox:
        with st.spinner("Generando prompts mejorados..."):
            prompts = await generate_variations(prompt_with_style, num_variants, True)
    else:
        prompts = [prompt_with_style]  

    if st.sidebar.button("Generar Imágenes"):
        with st.spinner("Generando imágenes..."):
            try:
                results = await gen(prompts, width, height, model_name, num_variants, prompt_checkbox, lora_option)
                st.session_state['generated_image_paths'] = results
                for result in results:
                    st.image(result, caption="Imagen Generada")
            except Exception as e:
                st.error(f"Error al generar las imágenes: {str(e)}")

    display_gallery()

if __name__ == "__main__":
    asyncio.run(main())