UI update
#1
by
medmac01
- opened
app.py
CHANGED
@@ -5,28 +5,6 @@ from PIL import Image
|
|
5 |
|
6 |
from model import *
|
7 |
|
8 |
-
# SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", "0") == "1"
|
9 |
-
|
10 |
-
# Constants
|
11 |
-
# base = "stabilityai/stable-diffusion-xl-base-1.0"
|
12 |
-
# repo = "ByteDance/SDXL-Lightning"
|
13 |
-
# checkpoints = {
|
14 |
-
# "1-Step" : ["sdxl_lightning_1step_unet_x0.safetensors", 1],
|
15 |
-
# "2-Step" : ["sdxl_lightning_2step_unet.safetensors", 2],
|
16 |
-
# "4-Step" : ["sdxl_lightning_4step_unet.safetensors", 4],
|
17 |
-
# "8-Step" : ["sdxl_lightning_8step_unet.safetensors", 8],
|
18 |
-
# }
|
19 |
-
# loaded = None
|
20 |
-
|
21 |
-
|
22 |
-
# Ensure model and scheduler are initialized in GPU-enabled function
|
23 |
-
# if torch.cuda.is_available():
|
24 |
-
# pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
|
25 |
-
|
26 |
-
|
27 |
-
# Function
|
28 |
-
# @spaces.GPU(enable_queue=True)
|
29 |
-
|
30 |
def generate_image(prompt):
|
31 |
|
32 |
return prompt_to_img(prompt)[0]
|
@@ -35,19 +13,18 @@ def generate_image(prompt):
|
|
35 |
|
36 |
# Gradio Interface
|
37 |
description = """
|
38 |
-
This demo utilizes
|
39 |
-
|
40 |
"""
|
41 |
|
42 |
with gr.Blocks(css="style.css") as demo:
|
43 |
-
gr.HTML("<h1><center>
|
44 |
gr.Markdown(description)
|
45 |
with gr.Group():
|
46 |
with gr.Row():
|
47 |
-
prompt = gr.Textbox(label='Enter your prompt
|
48 |
-
ckpt = gr.Dropdown(label='Select inference steps',choices=['1-Step', '2-Step', '4-Step', '8-Step'], value='4-Step', interactive=True)
|
49 |
submit = gr.Button(scale=1, variant='primary')
|
50 |
-
img = gr.Image(label='
|
51 |
|
52 |
prompt.submit(fn=generate_image,
|
53 |
inputs=[prompt],
|
|
|
5 |
|
6 |
from model import *
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
def generate_image(prompt):
|
9 |
|
10 |
return prompt_to_img(prompt)[0]
|
|
|
13 |
|
14 |
# Gradio Interface
|
15 |
description = """
|
16 |
+
This demo utilizes a specialized variant of the Stable Diffusion model designed for multilingual text-to-image synthesis. In response to the observed underperformance of existing models on languages beyond English, this project introduces the Multilingual Stable Diffusion, providing a more inclusive solution for diverse linguistic contexts.
|
17 |
+
Link to Github repo: https://github.com/NajlaaNawaii/Multilingual-Stable-Diffusion-Towards-more-Inclusive-Text-To-Image-Synthesis
|
18 |
"""
|
19 |
|
20 |
with gr.Blocks(css="style.css") as demo:
|
21 |
+
gr.HTML("<h1><center>Multilingual Stable Diffusion 🧨</center></h1>")
|
22 |
gr.Markdown(description)
|
23 |
with gr.Group():
|
24 |
with gr.Row():
|
25 |
+
prompt = gr.Textbox(label='Enter your prompt', scale=8)
|
|
|
26 |
submit = gr.Button(scale=1, variant='primary')
|
27 |
+
img = gr.Image(label='Generated Image')
|
28 |
|
29 |
prompt.submit(fn=generate_image,
|
30 |
inputs=[prompt],
|