File size: 6,008 Bytes
4b2575f
 
 
 
 
375b410
4b2575f
 
 
 
cfd53e3
 
 
 
 
 
4b2575f
 
 
 
 
 
cfd53e3
 
4b2575f
 
 
 
 
 
 
 
 
 
 
 
6a659da
4b2575f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cfd53e3
 
 
 
 
 
 
375b410
 
 
4b2575f
cfd53e3
375b410
 
 
 
4b2575f
 
 
cfd53e3
 
 
 
 
 
 
 
 
 
 
4b2575f
 
 
 
 
 
 
 
 
 
 
 
 
 
cfd53e3
 
4b2575f
 
 
 
 
375b410
 
 
 
 
 
 
 
 
 
4b2575f
cfd53e3
 
4b2575f
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import gradio as gr
from multit2i import (
    load_models,
    find_model_list,
    infer_multi,
    infer_multi_random,
    save_gallery_images,
    change_model,
    get_model_info_md,
    loaded_models,
    get_positive_prefix,
    get_positive_suffix,
    get_negative_prefix,
    get_negative_suffix,
    get_recom_prompt_type,
    set_recom_prompt_preset,
)


models = [
    'yodayo-ai/kivotos-xl-2.0',
    'yodayo-ai/holodayo-xl-2.1',
    'cagliostrolab/animagine-xl-3.1',
    'votepurchase/ponyDiffusionV6XL',
    'eienmojiki/Anything-XL',
    'eienmojiki/Starry-XL-v5.2',
    'digiplay/majicMIX_sombre_v2',
    'digiplay/majicMIX_realistic_v7',
    'votepurchase/counterfeitV30_v30',
    'Meina/MeinaMix_V11',
    'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
    'kayfahaarukku/UrangDiffusion-1.1',
    'Raelina/Rae-Diffusion-XL-V2',
    'Raelina/Raemu-XL-V4',
]


# Examples:
#models = ['yodayo-ai/kivotos-xl-2.0', 'yodayo-ai/holodayo-xl-2.1'] # specific models
#models = find_model_list("John6666", [], "", "last_modified", 20) # John6666's latest 20 models
#models = find_model_list("John6666", ["anime"], "", "last_modified", 20) # John6666's latest 20 models with 'anime' tag
#models = find_model_list("John6666", [], "anime", "last_modified", 20) # John6666's latest 20 models without 'anime' tag
#models = find_model_list("", [], "", "last_modified", 20) # latest 20 text-to-image models of huggingface
#models = find_model_list("", [], "", "downloads", 20) # monthly most downloaded 20 text-to-image models of huggingface


load_models(models, 10)
#load_models(models, 20) # Fetching 20 models at the same time. default: 5


css = """"""

with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
    with gr.Column(): 
        with gr.Accordion("Advanced settings", open=False):
            with gr.Accordion("Recommended Prompt"):
                recom_prompt_preset = gr.Radio(label="Set Presets", choices=get_recom_prompt_type(), value="Common")
                positive_prefix = gr.CheckboxGroup(label="Use Positive Prefix", choices=get_positive_prefix(), value=[])
                positive_suffix = gr.CheckboxGroup(label="Use Positive Suffix", choices=get_positive_suffix(), value=["Common"])
                negative_prefix = gr.CheckboxGroup(label="Use Negative Prefix", choices=get_negative_prefix(), value=[], visible=False)
                negative_suffix = gr.CheckboxGroup(label="Use Negative Suffix", choices=get_negative_suffix(), value=["Common"], visible=False)
        with gr.Group():
            model_name = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0])
            model_info = gr.Markdown(value=get_model_info_md(list(loaded_models.keys())[0]))
        prompt = gr.Text(label="Prompt", lines=1, max_lines=8, placeholder="1girl, solo, ...")
        neg_prompt = gr.Text(label="Negative Prompt", lines=1, max_lines=8, placeholder="", visible=False)
        with gr.Row():
            run_button = gr.Button("Generate Image", scale=2)
            random_button = gr.Button("Random Model 🎲", scale=1)
            image_num = gr.Number(label="Number of images", minimum=1, maximum=16, value=1, step=1, interactive=True, scale=1)
        results = gr.Gallery(label="Gallery", interactive=False, show_download_button=True, show_share_button=False,
                              container=True, format="png", object_fit="contain")
        image_files = gr.Files(label="Download", interactive=False)
        clear_results = gr.Button("Clear Gallery / Download")
    examples = gr.Examples(
        examples = [
            ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
            ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
            ["kafuu chino, 1girl, solo"],
            ["1girl"],
            ["beautiful sunset"],
        ],
        inputs=[prompt],
    )
    gr.Markdown(
        f"""This demo was created in reference to the following demos.

- [Nymbo/Flood](https://huggingface.co/spaces/Nymbo/Flood).

- [Yntec/ToyWorldXL](https://huggingface.co/spaces/Yntec/ToyWorldXL).

<br>The first startup takes a mind-boggling amount of time, but not so much after the second.

This is due to the time it takes for Gradio to generate an example image to cache.

            """
    )
    gr.DuplicateButton(value="Duplicate Space")

    model_name.change(change_model, [model_name], [model_info], queue=False, show_api=False)
    gr.on(
        triggers=[run_button.click, prompt.submit],
        fn=infer_multi,
        inputs=[prompt, neg_prompt, results, image_num, model_name,
                 positive_prefix, positive_suffix, negative_prefix, negative_suffix],
        outputs=[results],
        queue=True,
        show_progress="full",
        show_api=True,
    ).success(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
    gr.on(
        triggers=[random_button.click],
        fn=infer_multi_random,
        inputs=[prompt, neg_prompt, results, image_num,
                 positive_prefix, positive_suffix, negative_prefix, negative_suffix],
        outputs=[results],
        queue=True,
        show_progress="full",
        show_api=True,
    ).success(save_gallery_images, [results], [results, image_files], queue=False, show_api=False)
    clear_results.click(lambda: (None, None), None, [results, image_files], queue=False, show_api=False)
    recom_prompt_preset.change(set_recom_prompt_preset, [recom_prompt_preset],
     [positive_prefix, positive_suffix, negative_prefix, negative_suffix], queue=False, show_api=False)

demo.queue()
demo.launch()