File size: 6,456 Bytes
6fef025
f5b8400
 
7f74da5
b2c6964
 
 
6fef025
f5b8400
 
 
 
 
 
 
7f74da5
f5b8400
b2c6964
 
f5b8400
6fef025
 
f5b8400
6fef025
 
f5b8400
 
22b0edc
6fef025
f5b8400
b2c6964
6fef025
 
f5b8400
b2c6964
fc42fea
6fef025
 
b2c6964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fef025
b2c6964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8bc1820
 
 
 
 
 
 
 
b2c6964
 
 
362a8f8
b2c6964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5b8400
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import gradio as gr
from random import randint
from all_models import models
from externalmod import gr_Interface_load
import asyncio
from threading import RLock
lock = RLock()

def load_fn(models):
    global models_load
    models_load = {}
    
    for model in models:
        if model not in models_load.keys():
            try:
                m = gr_Interface_load(f'models/{model}')
            except Exception as error:
                print(error)
                m = gr.Interface(lambda: None, ['text'], ['image'])
            models_load.update({model: m})


load_fn(models)


num_models = 6
default_models = models[:num_models]
timeout = 300

def extend_choices(choices):
    return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']


def update_imgbox(choices):
    choices_plus = extend_choices(choices[:num_models])
    return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]


def update_imgbox_gallery(choices):
    choices_plus = extend_choices(choices[:num_models])
    return [gr.Gallery(None, label = m, visible = (m != 'NA')) for m in choices_plus]


async def infer(model_str, prompt, timeout):
    from PIL import Image
    noise = ""
    rand = randint(1, 500)
    for i in range(rand):
        noise += " "
    task = asyncio.create_task(asyncio.to_thread(models_load[model_str], f'{prompt} {noise}'))
    await asyncio.sleep(0)
    try:
        result = await asyncio.wait_for(task, timeout=timeout)
    except (Exception, asyncio.TimeoutError) as e:
        print(e)
        print(f"Task timed out: {model_str}")
        if not task.done(): task.cancel()
        result = None
    if task.done() and result is not None:
        with lock:
            image = Image.open(result).convert('RGBA')
        return image
    return None

def gen_fn(model_str, prompt):
    if model_str == 'NA':
        return None
    try:
        loop = asyncio.new_event_loop()
        result = loop.run_until_complete(infer(model_str, prompt, timeout))
    except (Exception, asyncio.CancelledError) as e:
        print(e)
        print(f"Task aborted: {model_str}")
        result = None
    finally:
        loop.close()
    return result


def add_gallery(image, model_str, gallery):
    if gallery is None: gallery = []
    with lock:
        if image is not None: gallery.insert(0, (image, model_str))
    return gallery


def gen_fn_gallery(model_str, prompt, gallery):
    if gallery is None: gallery = []
    if model_str == 'NA':
        yield gallery
    try:
        loop = asyncio.new_event_loop()
        result = loop.run_until_complete(infer(model_str, prompt, timeout))
        with lock:
            if result: gallery.insert(0, result)
    except (Exception, asyncio.CancelledError) as e:
        print(e)
        print(f"Task aborted: {model_str}")
    finally:
        loop.close()
    yield gallery


CSS="""
#container { max-width: 1200px; margin: 0 auto; !important; }
.output { width=112px; height=112px; !important; }
.gallery { width=100%; min_height=768px; !important; }
.guide { text-align: center; !important; }
"""

with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
    gr.HTML(
    """
        <div>
        <p> <center>For simultaneous generations without hidden queue check out <a href="https://huggingface.co/spaces/Yntec/ToyWorld">Toy World</a>! For more options like single model x6 check out <a href="https://huggingface.co/spaces/John6666/Diffusion80XX4sg">Diffusion80XX4sg</a> by John6666!</center>
        </p></div>
    """
)  
    with gr.Tab('Huggingface Diffusion'):
        with gr.Column(scale=2):
            txt_input = gr.Textbox(label='Your prompt:', lines=4)
            with gr.Row():
                gen_button = gr.Button('Generate up to 6 images from 1 to 18 minutes total', scale=2)
                stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
                gen_button.click(lambda: gr.update(interactive = True), None, stop_button)
            gr.Markdown("Scroll down to see more images and select models.", elem_classes="guide")

        with gr.Column(scale=1):
            with gr.Group():
                with gr.Row():
                    output = [gr.Image(label=m, show_download_button=True, elem_classes="output", interactive=False, min_width=80, show_share_button=False, visible=True) for m in default_models]
                    #output = [gr.Image(label=m, show_download_button=True, elem_classes="output", interactive=False, show_share_button=True) for m in default_models]
                    #output = [gr.Gallery(label=m, show_download_button=True, elem_classes="output", interactive=False, show_share_button=True, container=True, format="png", object_fit="cover") for m in default_models]
                    current_models = [gr.Textbox(m, visible=False) for m in default_models]

        with gr.Column(scale=2):
            gallery = gr.Gallery(label="Output", show_download_button=True, elem_classes="gallery",
                                interactive=False, show_share_button=True, container=True, format="png",
                                preview=True, object_fit="cover", columns=2, rows=2) 

        for m, o in zip(current_models, output):
            #gen_event = gen_button.click(gen_fn, [m, txt_input], o)
            #gen_event = gen_button.click(gen_fn_gallery, [m, txt_input, o], o)
            gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fn, inputs=[m, txt_input], outputs=[o])
            o.change(add_gallery, [o, m, gallery], [gallery])
            stop_button.click(lambda: gr.update(interactive = False), None, stop_button, cancels = [gen_event])

        with gr.Column(scale=4):
            with gr.Accordion('Model selection'):
                model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 866 available!', value=default_models, interactive=True)
                model_choice.change(update_imgbox, model_choice, output)
                #model_choice.change(update_imgbox_gallery, model_choice, output)
                model_choice.change(extend_choices, model_choice, current_models)

    gr.Markdown("Based on the [TestGen](https://huggingface.co/spaces/derwahnsinn/TestGen) Space by derwahnsinn, the [SpacIO](https://huggingface.co/spaces/RdnUser77/SpacIO_v1) Space by RdnUser77 and Omnibus's Maximum Multiplier!")

demo.queue()
demo.launch()