import base64 import io from huggingface_hub import InferenceClient import streamlit as st from PIL import Image # Configuración hf_token = st.secrets["HF_TOKEN"] client = InferenceClient(repo_id="black-forest-labs/FLUX.1-schnell", token=hf_token) def get_image_result(prompt, init_image=None): if init_image: img_buffer = io.BytesIO() init_image.save(img_buffer, format="PNG") img_bytes = img_buffer.getvalue() encoded_image = base64.b64encode(img_bytes).decode('utf-8') else: encoded_image = None try: output = client.img2img(prompt=prompt, image=encoded_image, strength=0.75) img_data = base64.b64decode(output['generated_image']) return Image.open(io.BytesIO(img_data)) except Exception as e: st.error(f"Error en la generación de la imagen: {str(e)}") return None # Interfaz de Streamlit st.title("Generación de Imágenes img2img con Flux") st.sidebar.header("Opciones de generación") prompt = st.sidebar.text_input("Escribe tu prompt:", value="Una escena futurista en una ciudad verde") uploaded_file = st.sidebar.file_uploader("Sube una imagen base (opcional)", type=["jpg", "png"]) if st.sidebar.button("Generar Imagen"): if uploaded_file: init_image = Image.open(uploaded_file).convert("RGB") st.image(init_image, caption="Imagen Base", use_column_width=True) else: init_image = None with st.spinner('Generando imagen...'): generated_image = get_image_result(prompt, init_image) if generated_image: st.image(generated_image, caption="Imagen Generada", use_column_width=True) img_bytes = io.BytesIO() generated_image.save(img_bytes, format="PNG") st.download_button(label="Descargar imagen", data=img_bytes.getvalue(), file_name="generated_image.png", mime="image/png")