Spaces:
Running
Running
File size: 3,423 Bytes
5565413 9e5bd2a 78b03d9 5565413 78b03d9 3181370 5565413 9e5bd2a 78b03d9 5565413 9e5bd2a 78b03d9 5565413 78b03d9 9e5bd2a 78b03d9 9e5bd2a 78b03d9 adde115 78b03d9 406ddd7 583c345 20f06f5 406ddd7 5565413 78b03d9 9e5bd2a 78b03d9 5565413 78b03d9 9e5bd2a 78b03d9 9e5bd2a 78b03d9 9e5bd2a 78b03d9 314450e bf2b450 78b03d9 5565413 885b960 78b03d9 5565413 78b03d9 d100981 d0673bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
## app.py:
import torch
import gradio as gr
from diffusers import StableDiffusionPipeline
import requests
from io import BytesIO
import os
from PIL import Image
def translate_text(text, target_language='en'):
API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-ar-en"
headers = {"Authorization": f"Bearer {os.getenv('API_TOKEN')}"}
response = requests.post(API_URL, headers=headers, json=text)
if response.status_code == 200:
return response.json()[0]['translation_text']
else:
print("Failed to translate text:", response.text)
return text # Return the original text if translation fails
# Function to post data to an API and return response
def query(payload, API_URL, headers):
response = requests.post(API_URL, headers=headers, json=payload)
return response.content
# Function to generate images based on prompts using the Hugging Face API
def generate_image(prompt, model_choice, translate=False):
if translate:
prompt = translate_text(prompt, target_language='en') # Assuming you want to translate to English
model_urls = {
"Stable Diffusion v1.5": "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5",
"dalle-3-xl-v2": "https://api-inference.huggingface.co/models/ehristoforu/dalle-3-xl-v2",
"midjourney-v6": "https://api-inference.huggingface.co/models/Kvikontent/midjourney-v6",
"openjourney-v4": "https://api-inference.huggingface.co/models/prompthero/openjourney-v4",
"LCM_Dreamshaper_v7": "https://api-inference.huggingface.co/models/SimianLuo/LCM_Dreamshaper_v7",
}
API_URL = model_urls[model_choice]
headers = {"Authorization": f"Bearer {os.getenv('API_TOKEN')}"}
payload = {"inputs": prompt}
data = query(payload, API_URL, headers)
try:
# Load the image from byte data
image = Image.open(BytesIO(data))
# Resize the image
image = image.resize((400, 400))
# Convert the image object back to bytes for Gradio output
buf = BytesIO()
image.save(buf, format='PNG')
buf.seek(0)
return image
except Exception as e:
print("Error processing the image:", e)
return None # Return None or an appropriate error message/image
# Set up environment variable correctly
API_TOKEN = os.getenv("API_TOKEN")
# Styling with custom CSS
css = """
body {background-color: #f0f2f5;}
.gradio-app {background-color: #ffffff; border-radius: 12px; box-shadow: 0 0 12px rgba(0,0,0,0.1);}
button {color: white; background-color: #106BA3; border: none; border-radius: 5px;}
"""
# Define interface
title = "نموذج توليد الصور"
description = "اكتب وصف للصورة التي تود من النظام التوليدي انشاءها"
iface = gr.Interface(
fn=generate_image,
inputs=[
gr.components.Textbox(lines=2, placeholder="Enter the description of the image here..."),
gr.components.Dropdown(choices=["Stable Diffusion v1.5","dalle-3-xl-v2","midjourney-v6","openjourney-v4","LCM_Dreamshaper_v7"], label="Choose Model", value='Stable Diffusion v1.5'),
gr.components.Checkbox(label="Translate The Text Before Generating Image", value=False)
],
outputs=gr.components.Image(),
title=title,
description=description,
theme="default",
css=css
)
# Launch the interface
iface.launch()
|