Spaces:
Running
on
Zero
Running
on
Zero
File size: 10,564 Bytes
f3d74d8 80e6c51 1632b07 751481f 1632b07 751481f 1632b07 751481f 1632b07 a9eec19 751481f 1502698 751481f 84b9644 703bcee 1632b07 84b9644 1632b07 84b9644 1632b07 84b9644 1632b07 751481f 1632b07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import spaces
import gradio as gr
from utils import gradio_copy_text, COPY_ACTION_JS
from tagger import convert_danbooru_to_e621_prompt, insert_recom_prompt
from genimage import generate_image
from llmdolphin import (get_llm_formats, get_dolphin_model_format,
get_dolphin_models, get_dolphin_model_info, select_dolphin_model,
select_dolphin_format, add_dolphin_models, get_dolphin_sysprompt,
get_dolphin_sysprompt_mode, select_dolphin_sysprompt, get_dolphin_languages,
select_dolphin_language, dolphin_respond, dolphin_parse, respond_playground)
css = """
.title { text-align: center; }
"""
with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=css, delete_cache=(60, 3600)) as app:
with gr.Tab("Prompt Translator"):
with gr.Column():
gr.Markdown("""# Natural Text to SD Prompt Translator With LLM alpha
Text in natural language (English, Japanese, ...) => Prompt
""", elem_classes="title")
with gr.Group():
chatbot = gr.Chatbot(show_copy_button=True, show_share_button=False, layout="bubble", container=True)
with gr.Row():
chat_msg = gr.Textbox(show_label=False, placeholder="Input text in English, Japanese, or any other languages and press Enter or click Send.", scale=4)
chat_submit = gr.Button("Send", scale=1, variant="primary")
chat_clear = gr.Button("Clear", scale=1, variant="secondary")
with gr.Accordion("Additional inputs", open=False):
chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
with gr.Row():
chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
with gr.Accordion("Add models", open=False):
chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
chat_add_submit = gr.Button("Update lists of models")
with gr.Accordion("Modes", open=True):
chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
with gr.Row():
chat_mode = gr.Dropdown(choices=get_dolphin_sysprompt_mode(), value=get_dolphin_sysprompt_mode()[0], allow_custom_value=False, label="Mode")
chat_lang = gr.Dropdown(choices=get_dolphin_languages(), value="English", allow_custom_value=True, label="Output language")
with gr.Row():
with gr.Group():
output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
with gr.Group():
output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
with gr.Accordion(label="Advanced options", open=False, visible=False):
tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
dummy_np_pony = gr.Textbox(label="Negative prompt", value="", visible=False)
recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
generate_image_btn = gr.Button(value="GENERATE IMAGE", size="lg", variant="primary")
with gr.Row():
result_image = gr.Gallery(label="Generated images", columns=1, object_fit="contain", container=True, preview=True, show_label=False, show_share_button=False, show_download_button=True, interactive=False, visible=True, format="png")
with gr.Tab("GGUF-Playground"):
gr.Markdown("""# Chat with lots of Models and LLMs using llama.cpp
This tab is copy of [CaioXapelaum/GGUF-Playground](https://huggingface.co/spaces/CaioXapelaum/GGUF-Playground).<br>
Don't worry about the strange appearance, **it's just a bug of Gradio!**""", elem_classes="title")
pg_chatbot = gr.Chatbot(scale=1, likeable=False, show_copy_button=True, show_share_button=False)
with gr.Accordion("Additional inputs", open=False):
pg_chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
pg_chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
pg_chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
pg_chat_sysmsg = gr.Textbox(value="You are a helpful assistant.", label="System message")
with gr.Row():
pg_chat_tokens = gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens")
pg_chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
pg_chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
pg_chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
pg_chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
with gr.Accordion("Add models", open=True):
pg_chat_add_text = gr.Textbox(label="URL or Repo ID", placeholder="https://huggingface.co/mradermacher/MagnumChronos-i1-GGUF/blob/main/MagnumChronos.i1-Q4_K_M.gguf", lines=1)
pg_chat_add_format = gr.Dropdown(choices=get_llm_formats(), value=get_llm_formats()[0], label="Message format")
pg_chat_add_submit = gr.Button("Update lists of models")
gr.ChatInterface(
fn=respond_playground,
#title="Chat with lots of Models and LLMs using llama.cpp",
retry_btn="Retry",
undo_btn="Undo",
clear_btn="Clear",
submit_btn="Send",
#additional_inputs_accordion='gr.Accordion(label="Additional Inputs", open=False)',
additional_inputs=[pg_chat_model, pg_chat_sysmsg, pg_chat_tokens, pg_chat_temperature, pg_chat_topp, pg_chat_topk, pg_chat_rp],
chatbot=pg_chatbot
)
gr.LoginButton()
gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
gr.on(
triggers=[chat_msg.submit, chat_submit.click],
fn=dolphin_respond,
inputs=[chat_msg, chatbot, chat_model, chat_sysmsg, chat_tokens, chat_temperature, chat_topp, chat_topk, chat_rp],
outputs=[chatbot],
queue=True,
show_progress="full",
trigger_mode="once",
).success(dolphin_parse, [chatbot], [output_text, copy_btn, copy_btn_pony]).success(
convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False,
).success(insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False,
).success(insert_recom_prompt, [output_text_pony, dummy_np_pony, recom_pony], [output_text_pony, dummy_np_pony], queue=False)
chat_clear.click(lambda: None, None, chatbot, queue=False)
chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full")\
.success(lambda: None, None, chatbot, queue=False)
chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False)\
.success(lambda: None, None, chatbot, queue=False)
chat_mode.change(select_dolphin_sysprompt, [chat_mode], [chat_sysmsg], queue=False)
chat_lang.change(select_dolphin_language, [chat_lang], [chat_sysmsg], queue=False)
gr.on(
triggers=[chat_add_text.submit, chat_add_submit.click],
fn=add_dolphin_models,
inputs=[chat_add_text, chat_add_format],
outputs=[chat_model],
queue=False,
trigger_mode="once",
)
copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS)
copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS)
generate_image_btn.click(generate_image, [output_text, dummy_np], [result_image], show_progress="full")
pg_chat_model.change(select_dolphin_model, [pg_chat_model], [pg_chat_model, pg_chat_format, pg_chat_model_info], queue=True, show_progress="full")#\
#.success(lambda: None, None, pg_chatbot, queue=False)
pg_chat_format.change(select_dolphin_format, [pg_chat_format], [pg_chat_format], queue=False)#\
#.success(lambda: None, None, pg_chatbot, queue=False)
gr.on(
triggers=[pg_chat_add_text.submit, pg_chat_add_submit.click],
fn=add_dolphin_models,
inputs=[pg_chat_add_text, pg_chat_add_format],
outputs=[pg_chat_model],
queue=False,
trigger_mode="once",
)
if __name__ == "__main__":
app.queue()
app.launch()
|