salomonsky commited on
Commit
b2d8766
1 Parent(s): 1922d6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -36
app.py CHANGED
@@ -1,47 +1,175 @@
1
- import base64
2
- import io
3
- from huggingface_hub import InferenceClient
4
- import streamlit as st
5
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- # Configuración
8
- hf_token = st.secrets["HF_TOKEN"]
9
- client = InferenceClient(repo_id="black-forest-labs/FLUX.1-schnell", token=hf_token)
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def get_image_result(prompt, init_image=None):
12
- if init_image:
13
- img_buffer = io.BytesIO()
14
- init_image.save(img_buffer, format="PNG")
15
- img_bytes = img_buffer.getvalue()
16
- encoded_image = base64.b64encode(img_bytes).decode('utf-8')
 
 
 
 
 
 
 
 
 
 
 
 
17
  else:
18
- encoded_image = None
 
 
 
 
19
 
 
 
 
 
20
  try:
21
- output = client.img2img(prompt=prompt, image=encoded_image, strength=0.75)
22
- img_data = base64.b64decode(output['generated_image'])
23
- return Image.open(io.BytesIO(img_data))
 
 
 
 
 
 
 
 
 
 
 
24
  except Exception as e:
25
- st.error(f"Error en la generación de la imagen: {str(e)}")
 
 
 
 
 
26
  return None
 
 
 
 
 
27
 
28
- # Interfaz de Streamlit
29
- st.title("Generación de Imágenes img2img con Flux")
30
- st.sidebar.header("Opciones de generación")
31
- prompt = st.sidebar.text_input("Escribe tu prompt:", value="Una escena futurista en una ciudad verde")
32
- uploaded_file = st.sidebar.file_uploader("Sube una imagen base (opcional)", type=["jpg", "png"])
33
 
34
- if st.sidebar.button("Generar Imagen"):
35
- if uploaded_file:
36
- init_image = Image.open(uploaded_file).convert("RGB")
37
- st.image(init_image, caption="Imagen Base", use_column_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  else:
39
- init_image = None
40
-
41
- with st.spinner('Generando imagen...'):
42
- generated_image = get_image_result(prompt, init_image)
43
- if generated_image:
44
- st.image(generated_image, caption="Imagen Generada", use_column_width=True)
45
- img_bytes = io.BytesIO()
46
- generated_image.save(img_bytes, format="PNG")
47
- st.download_button(label="Descargar imagen", data=img_bytes.getvalue(), file_name="generated_image.png", mime="image/png")
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
 
 
 
2
  from PIL import Image
3
+ import streamlit as st
4
+ from huggingface_hub import InferenceClient, AsyncInferenceClient
5
+ import asyncio
6
+ import os
7
+ import random
8
+ import numpy as np
9
+ import yaml
10
+
11
+ try:
12
+ with open("config.yaml", "r") as file:
13
+ credentials = yaml.safe_load(file)
14
+ except Exception as e:
15
+ st.error(f"Error al cargar el archivo de configuración: {e}")
16
+ credentials = {"username": "", "password": ""}
17
+
18
+ MAX_SEED = np.iinfo(np.int32).max
19
+ client = AsyncInferenceClient()
20
+ llm_client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
21
+ DATA_PATH = Path("./data")
22
+ DATA_PATH.mkdir(exist_ok=True)
23
 
24
+ def authenticate_user(username, password):
25
+ return username == credentials["username"] and password == credentials["password"]
26
+
27
+ async def gen(prompts, width, height, model_name, num_variants=1, use_enhanced=True):
28
+ images = []
29
+ try:
30
+ for idx, prompt in enumerate(prompts):
31
+ seed = random.randint(0, MAX_SEED)
32
+ image, seed = await generate_image(prompt, width, height, seed, model_name)
33
+ image_path = save_image(image, f"generated_image_{seed}.jpg", prompt)
34
+ if image_path:
35
+ st.success(f"Imagen {idx + 1} generada")
36
+ images.append(str(image_path))
37
+ except Exception as e:
38
+ st.error(f"Error al generar imágenes: {e}")
39
+ return images
40
 
41
+ def list_saved_images():
42
+ return sorted(DATA_PATH.glob("*.jpg"), key=os.path.getmtime, reverse=True)
43
+
44
+ def display_gallery():
45
+ st.header("Galería de Imágenes Guardadas")
46
+ images = list_saved_images()
47
+ if images:
48
+ cols = st.columns(8)
49
+ for i, image_file in enumerate(images):
50
+ with cols[i % 8]:
51
+ st.image(str(image_file), caption=image_file.name, use_column_width=True)
52
+ prompt = get_prompt_for_image(image_file.name)
53
+ st.write(prompt[:300])
54
+
55
+ if st.button(f"Borrar", key=f"delete_{i}_{image_file.name}"):
56
+ os.remove(image_file)
57
+ st.success("Imagen borrada")
58
+ display_gallery()
59
  else:
60
+ st.info("No hay imágenes guardadas.")
61
+
62
+ def save_prompt(prompt):
63
+ with open(DATA_PATH / "prompts.txt", "a") as f:
64
+ f.write(prompt + "\n")
65
 
66
+ def run_async(func, *args):
67
+ return asyncio.run(func(*args))
68
+
69
+ async def improve_prompt(prompt):
70
  try:
71
+ instructions = [
72
+ "With my idea create a vibrant description for a detailed txt2img prompt, 300 characters max.",
73
+ "With my idea write a creative and detailed text-to-image prompt in English, 300 characters max.",
74
+ "With my idea generate a descriptive and visual txt2img prompt in English, 300 characters max.",
75
+ "With my idea describe a photorealistic with illumination txt2img prompt in English, 300 characters max.",
76
+ "With my idea give a realistic and elegant txt2img prompt in English, 300 characters max.",
77
+ "With my idea conform a visually dynamic and surreal txt2img prompt in English, 300 characters max.",
78
+ "With my idea realize an artistic and cinematic txt2img prompt in English, 300 characters max.",
79
+ "With my idea make a narrative and immersive txt2img prompt in English, 300 characters max."
80
+ ]
81
+ instruction = random.choice(instructions)
82
+ formatted_prompt = f"{prompt}: {instruction}"
83
+ response = llm_client.text_generation(formatted_prompt, max_new_tokens=100)
84
+ return response['generated_text'][:100] if 'generated_text' in response else response.strip()
85
  except Exception as e:
86
+ return f"Error mejorando el prompt: {e}"
87
+
88
+ def save_image(image, file_name, prompt=None):
89
+ image_path = DATA_PATH / file_name
90
+ if image_path.exists():
91
+ st.warning(f"La imagen '{file_name}' ya existe en la galería. No se guardó.")
92
  return None
93
+ else:
94
+ image.save(image_path, format="JPEG")
95
+ if prompt:
96
+ save_prompt(f"{file_name}: {prompt}")
97
+ return image_path
98
 
99
+ async def generate_image(prompt, width, height, seed, model_name):
100
+ if seed == -1:
101
+ seed = random.randint(0, MAX_SEED)
102
+ image = await client.text_to_image(prompt=prompt, height=height, width=width, model=model_name)
103
+ return image, seed
104
 
105
+ def get_prompt_for_image(image_name):
106
+ prompts = {}
107
+ try:
108
+ with open(DATA_PATH / "prompts.txt", "r") as f:
109
+ for line in f:
110
+ if line.startswith(image_name):
111
+ prompts[image_name] = line.split(": ", 1)[1].strip()
112
+ except FileNotFoundError:
113
+ return "No hay prompt asociado."
114
+ return prompts.get(image_name, "No hay prompt asociado.")
115
+
116
+ def login_form():
117
+ st.title("Iniciar Sesión")
118
+ username = st.text_input("Usuario", value="admin")
119
+ password = st.text_input("Contraseña", value="flux3x", type="password")
120
+ if st.button("Iniciar Sesión"):
121
+ if authenticate_user(username, password):
122
+ st.success("Autenticación exitosa.")
123
+ st.session_state['authenticated'] = True
124
+ else:
125
+ st.error("Credenciales incorrectas. Intenta de nuevo.")
126
+
127
+ async def generate_variations(prompt, num_variants, use_enhanced):
128
+ prompts = set()
129
+ while len(prompts) < num_variants:
130
+ if use_enhanced:
131
+ enhanced_prompt = await improve_prompt(prompt)
132
+ prompts.add(enhanced_prompt)
133
+ else:
134
+ prompts.add(prompt)
135
+ return list(prompts)
136
+
137
+ async def main():
138
+ st.set_page_config(layout="wide")
139
+
140
+ if 'authenticated' not in st.session_state or not st.session_state['authenticated']:
141
+ login_form()
142
+ return
143
+
144
+ st.title("Flux + Multiple Images")
145
+ prompt = st.sidebar.text_area("Descripción de la imagen", height=150, max_chars=500)
146
+ format_option = st.sidebar.selectbox("Formato", ["9:16", "16:9", "1:1"])
147
+ prompt_checkbox = st.sidebar.checkbox("Prompt Enhancer")
148
+ model_option = st.sidebar.selectbox("Modelo", ["black-forest-labs/FLUX.1-schnell", "black-forest-labs/FLUX.1-dev"])
149
+ width, height = (360, 640) if format_option == "9:16" else (640, 360) if format_option == "16:9" else (640, 640)
150
+
151
+ if prompt_checkbox:
152
+ num_variants = st.sidebar.slider("Número de imágenes a generar", 1, 8, 1)
153
+ else:
154
+ num_variants = 1
155
+
156
+ if prompt_checkbox:
157
+ with st.spinner("Generando prompts mejorados..."):
158
+ prompts = await generate_variations(prompt, num_variants, True)
159
  else:
160
+ prompts = [prompt]
161
+
162
+ if st.sidebar.button("Generar Imágenes"):
163
+ with st.spinner("Generando imágenes..."):
164
+ try:
165
+ results = await gen(prompts, width, height, model_option, num_variants, prompt_checkbox)
166
+ st.session_state['generated_image_paths'] = results
167
+ for result in results:
168
+ st.image(result, caption="Imagen Generada")
169
+ except Exception as e:
170
+ st.error(f"Error al generar las imágenes: {str(e)}")
171
+
172
+ display_gallery()
173
+
174
+ if __name__ == "__main__":
175
+ asyncio.run(main())