client / app.py
P01yH3dr0n's picture
minor
e2906cc
import os
import datetime
import toml
import gradio as gr
import shutil
from huggingface_hub import HfApi, snapshot_download
from pnginfo import read_info_from_image, send_paras
from images_history import img_history_ui
from director_tools import director_ui, send_outputs
from tagger import tagger_ui
from utils import set_token, generate_novelai_image, image_from_bytes, get_remain_anlas, calculate_cost
client_config = toml.load("config.toml")['client']
today_count = 0
today = datetime.date.today().strftime('%Y-%m-%d')
api = HfApi()
def get_count():
global today_count, today
now = datetime.date.today().strftime('%Y-%m-%d')
if now != today:
today = now
today_count = 0
return today_count
def change_schedule(sampler):
if sampler == "ddim_v3":
return gr.Dropdown(value="native", interactive=False)
elif sampler == "k_euler_ancestral":
return gr.Dropdown(value="native", interactive=True)
elif sampler == "k_dpmpp_2m":
return gr.Dropdown(value="exponential", interactive=True)
else:
return gr.Dropdown(value="karras", interactive=True)
def control_ui():
prompt = gr.TextArea(elem_id='txt2img_prompt', label="提示词", lines=3)
quality_tags = gr.TextArea(
elem_id='txt2img_qua_prompt', label="质量词", lines=1,
value=client_config['default_quality'],
)
neg_prompt = gr.TextArea(
elem_id='txt2img_neg_prompt', label="负面词", lines=1,
value=client_config['default_neg'],
)
prompt.focus(fn=None, inputs=None, js="() => {run();}")
quality_tags.focus(fn=None, inputs=None, js="() => {run();}")
neg_prompt.focus(fn=None, inputs=None, js="() => {run();}")
with gr.Accordion("参数设置", open=False):
with gr.Row():
sampler = gr.Dropdown(
choices=[
"k_euler", "k_euler_ancestral", "k_dpmpp_2s_ancestral", "k_dpmpp_2m_sde",
"k_dpmpp_2m", "k_dpmpp_sde", "ddim_v3"
],
value="k_euler",
label="采样器",
interactive=True
)
scale = gr.Slider(label="CFG Scale", value=5.0, minimum=0, maximum=10, step=0.1)
steps = gr.Slider(label="步数", value=28, minimum=1, maximum=50, step=1)
with gr.Row():
seed = gr.Number(label="种子", value=-1, step=1, maximum=2**32-1, minimum=-1, scale=3)
rand_seed = gr.Button('🎲️', scale=1)
reuse_seed = gr.Button('♻️', scale=1)
with gr.Row():
width = gr.Slider(label="宽度", value=1024, minimum=64, maximum=2048, step=64)
height = gr.Slider(label="高度", value=1024, minimum=64, maximum=2048, step=64)
with gr.Row():
with gr.Column():
with gr.Accordion('风格迁移', open=False) as vibe_tab:
ref_images = gr.Gallery(label="上传单(多)张图片", format="png", value=None, interactive=True, type="pil", show_share_button=False)
info_extracts = gr.State([])
ref_strs = gr.State([])
@gr.render(inputs=ref_images, triggers=[ref_images.upload, ref_images.change])
def multiple_vibes(images):
if images is None:
return
else:
exts = []
strs = []
for i, _ in enumerate(images):
with gr.Row():
extract = gr.Slider(label=f"图片{i + 1} 参考信息提取", value=1, minimum=0, maximum=1, step=0.1, interactive=True)
strength = gr.Slider(label=f"图片{i + 1} 参考强度", value=0.6, minimum=0, maximum=1, step=0.1, interactive=True)
exts.append(extract)
strs.append(strength)
def dynparas(*args):
return list(args)
for e, s in zip(exts, strs):
e.change(fn=dynparas, inputs=exts, outputs=info_extracts)
s.change(fn=dynparas, inputs=strs, outputs=ref_strs)
ref_images.change(lambda l: ([], []) if l is None else ([1]*len(l), [0.6]*len(l)), inputs=ref_images, outputs=[info_extracts, ref_strs])
ref_images.upload(lambda l: ([], []) if l is None else ([1]*len(l), [0.6]*len(l)), inputs=ref_images, outputs=[info_extracts, ref_strs])
with gr.Accordion('附加输入', open=False, elem_id="i2i_tab") as i2i_tab:
with gr.Tab('图生图', elem_id="i2i_block") as i2i:
i2i_image = gr.Image(label="上传图片", value=None, sources=["upload", "clipboard", "webcam"], interactive=True, type="pil", show_share_button=False)
i2i_str = gr.Slider(label='去噪强度', value=0.7, minimum=0, maximum=0.99, step=0.01)
i2i_noise = gr.Slider(label='噪声', value=0, minimum=0, maximum=1, step=0.1)
reuse_img_i2i = gr.Button(value='使用上一次生成的图片')
with gr.Tab('局部重绘', elem_id="inp_block") as inp:
overlay = gr.Checkbox(label='覆盖原图', value=True)
inp_img = gr.ImageMask(label="上传图片", value=None, sources=["upload", "clipboard", "webcam"], interactive=True, type="pil", eraser=False, transforms=None, brush=gr.Brush(colors=['#FFFFFF'], color_mode='fixed'), layers=False, show_share_button=False)
reuse_img_inp = gr.Button(value='使用上一次生成的图片')
selection = gr.Radio(choices=['i2i', 'inp'], value='i2i', visible=False)
with gr.Row():
with gr.Column():
with gr.Accordion('高级选项', open=False):
scheduler = gr.Dropdown(
choices=[
"native", "karras", "exponential", "polyexponential"
],
value="karras",
label="Scheduler",
interactive=True
)
with gr.Row():
smea = gr.Checkbox(False, label="SMEA")
dyn = gr.Checkbox(False, label="SMEA DYN")
variety = gr.Checkbox(False, label="Variety+")
with gr.Row():
dyn_threshold = gr.Checkbox(False, label="Decrisp")
cfg_rescale = gr.Slider(0, 1, 0, step=0.01, label="CFG rescale")
with gr.Column():
save = gr.Checkbox(value=True, label='云端保存图片')
gen_btn = gr.Button(value="生成", variant="primary")
stop_btn = gr.Button(value="取消", variant="stop", visible=False)
sampler.change(change_schedule, sampler, scheduler)
rand_seed.click(fn=lambda: -1, inputs=None, outputs=seed)
i2i.select(lambda: 'i2i', inputs=None, outputs=selection)
inp.select(lambda: 'inp', inputs=None, outputs=selection)
return gen_btn, stop_btn, [prompt, quality_tags, neg_prompt, seed, scale, width, height, steps, sampler, scheduler, smea, dyn, dyn_threshold, cfg_rescale, variety, ref_images, info_extracts, ref_strs, i2i_image, i2i_str, i2i_noise, overlay, inp_img, selection], [save, rand_seed, reuse_seed, reuse_img_i2i, reuse_img_inp, vibe_tab, i2i_tab]
def generate(prompt, quality_tags, neg_prompt, seed, scale, width, height, steps, sampler, scheduler, smea, dyn, dyn_threshold, cfg_rescale, variety, ref_images, info_extracts, ref_strs, i2i_image, i2i_str, i2i_noise, overlay, inp_img, selection):
global today_count
set_token(os.environ.get('token'))
img_data, payload = generate_novelai_image(
f"{prompt}, {quality_tags}", neg_prompt, seed, scale,
width, height, steps, sampler, scheduler,
smea, dyn, dyn_threshold, cfg_rescale, variety, ref_images, info_extracts, ref_strs,
i2i_image, i2i_str, i2i_noise, overlay, inp_img, selection
)
if not isinstance(img_data, bytes):
return gr.Image(value=None), payload
today_count = get_count() + 1
img = image_from_bytes(img_data)
return img, payload
def preview_ui():
with gr.Blocks(css='#preview_image { height: 100%;}'):
image = gr.Image(format='png', elem_id='preview_image', interactive=False, type='filepath', show_share_button=False)
send_dtool = gr.Button(value="发送到定向修图", visible=False)
image.change(lambda i: gr.Button(visible=False) if i is None else gr.Button(visible=True), inputs=image, outputs=send_dtool)
info = gr.JSON(value={}, label="生成信息")
return image, info, send_dtool
def rename_save_img(path, payload, save):
if path is None:
return None
default = os.path.basename(path)
filename = str(today_count).rjust(5, '0') + '-' + str(payload['parameters']['seed']) + '.png'
renamed_path = path.replace(default, filename)
if os.path.exists(renamed_path):
return renamed_path
os.replace(path, renamed_path)
if save:
save_path = client_config['save_path']
today = datetime.date.today().strftime('%Y-%m-%d')
today_path = os.path.join(save_path, today)
os.makedirs(today_path, exist_ok=True)
shutil.copy(renamed_path, os.path.join(today_path, filename))
api.upload_folder(folder_path=today_path, path_in_repo=today_path, repo_id="P01yH3dr0n/naimages", repo_type="dataset", token=os.environ.get("hf_token"))
return renamed_path
def update_btn_cost(w, h, s, sm, dyn, i2i_img, i2i_str, inp_img, selection):
if selection == 'i2i' and i2i_img is not None:
cost = calculate_cost(w, h, s, False, False, i2i_str)
elif selection == 'inp' and inp_img['background'].getextrema()[3][1] > 0:
cost = calculate_cost(w, h, s, False, False)
else:
cost = calculate_cost(w, h, s, sm, dyn)
return gr.Button(value=f"生成(预计消耗{cost}点数)")
def main_ui():
with gr.Blocks():
with gr.Row(variant="panel"):
with gr.Column():
gen_btn, stop_btn, paras, others = control_ui()
with gr.Column():
image, info, send_dtool = preview_ui()
cost_list = paras[5:8] + paras[10:12] + paras[18:20] + paras[22:24]
for component in cost_list:
component.change(update_btn_cost, inputs=cost_list, outputs=gen_btn)
gen = gen_btn.click(lambda: (gr.Button(visible=False), gr.Button(visible=True)), inputs=None, outputs=[gen_btn, stop_btn]).then(
generate, paras, [image, info], concurrency_limit=1, concurrency_id="generate").then(
rename_save_img, inputs=[image, info, others[0]], outputs=image, trigger_mode="once").then(
lambda: (gr.Button(visible=True), gr.Button(visible=False)), inputs=None, outputs=[gen_btn, stop_btn])
others[2].click(lambda o, s: o if len(s) == 0 else s['parameters']['seed'], inputs=[paras[3], info], outputs=paras[3])
others[3].click(lambda i: i, inputs=image, outputs=paras[18])
others[4].click(lambda i: gr.ImageEditor(value=i), inputs=image, outputs=paras[22])
stop_btn.click(lambda: (gr.Button(visible=True), gr.Button(visible=False)), inputs=None, outputs=[gen_btn, stop_btn], cancels=[gen])
return image, paras, others, send_dtool
def util_ui():
with gr.Blocks():
with gr.Row(equal_height=False):
with gr.Column(variant='panel'):
image = gr.Image(label="上传图片", image_mode="RGBA", sources=["upload"], interactive=True, type="pil")
with gr.Column(variant='panel'):
info = gr.HTML('')
items = gr.JSON(value={}, visible=False)
png2main = gr.Button('参数发送到文生图', visible=False)
items.change(lambda i: gr.Button(visible=True) if len(i) else gr.Button(visible=False), inputs=items, outputs=png2main)
return png2main, items, info, image
def load_javascript():
head = ''
for f in sorted(os.listdir('./tagcomplete/javascript')):
head += f'<script type="text/javascript" src="file=tagcomplete/javascript/{f}"></script>\n'
share = gr.routes.templates.TemplateResponse
def template_response(*args, **kwargs):
res = share(*args, **kwargs)
res.body = res.body.replace(b'</body>', f'{head}</body>'.encode("utf8"))
res.init_headers()
return res
gr.routes.templates.TemplateResponse = template_response
def send_and_jump(component, fn, inputs, outputs, tab, trigger):
component.click(fn, inputs=inputs, outputs=outputs)
component.click(fn=None,
js="(x) => { if (x !== null && x != 0) document.getElementById('" + tab + "-button').click();}",
inputs=trigger)
def send_jump_select(component, fn, inputs, outputs, tab, accordin, trigger, secondary=None):
extra_js = ''
if secondary is not None:
extra_js = "document.getElementById('" + secondary + "-button').click();"
component.click(fn, inputs=inputs, outputs=outputs).success(
lambda x: gr.Accordion(open=True) if x else gr.Accordion(), inputs=trigger, outputs=accordin)
component.click(fn=None,
js="(x) => { if (x !== null && x != 0) document.getElementById('" + tab + "-button').click();" + extra_js + " return null; }",
inputs=trigger)
def ui():
load_javascript()
set_token(os.environ.get('token'))
with gr.Blocks(title="NAI Client", analytics_enabled=False, theme=gr.themes.Soft(), js="() => {document.body.classList.toggle('dark', false);}") as website:
with gr.Row():
remain_anlas = gr.Textbox(label="剩余点数", value=get_remain_anlas, scale=3)
refresh = gr.Button("🔄")
refresh.click(get_remain_anlas, inputs=None, outputs=remain_anlas)
with gr.Tabs():
with gr.TabItem("图片生成", elem_id="client_ui_main"):
image, paras, others, send_dtool = main_ui()
with gr.TabItem("定向修图", elem_id="client_ui_dtool"):
from_t2i, send_i2i, send_inp, send_vib, in_image, out_image, d_index = director_ui()
with gr.TabItem("图片信息读取"):
png2main, png_items, info, read_image = util_ui()
with gr.TabItem("Tagger反推"):
tags, tagger2main = tagger_ui()
with gr.TabItem("云端图片浏览") as tab:
gallery, h_index, gal2main, gal_items, history2ref, history2i2i, history2inp, history2dtl = img_history_ui(tab)
with gr.TabItem("设置"):
switchLightDark = gr.Button(value="切换浅色/深色模式")
switchLightDark.click(fn=None, js="() => {document.body.classList.toggle('dark');}")
loadTagComplete = gr.Button(value="重新加载tag补全")
loadTagComplete.click(fn=None, js="() => {document.getElementById('client_ui_main-button').click();run();}")
clearTagCache = gr.Button(value="清除tag补全缓存")
clearTagCache.click(fn=None, js="() => {localStorage.clear();}")
send_and_jump(png2main, send_paras, [png_items] + paras[:15], paras[:15], "client_ui_main", read_image)
send_and_jump(gal2main, send_paras, [gal_items] + paras[:15], paras[:15], "client_ui_main", gal_items)
from_t2i.click(lambda x: x, inputs=image, outputs=in_image)
send_jump_select(send_i2i, send_outputs, [out_image, d_index], paras[18], "client_ui_main", others[6], out_image, "i2i_block")
send_jump_select(send_inp, send_outputs, [out_image, d_index], paras[22], "client_ui_main", others[6], out_image, "inp_block")
send_jump_select(send_vib, (lambda l, i: l if i == -1 else [l[i]]), [out_image, d_index], paras[15], "client_ui_main", others[5], out_image)
send_and_jump(send_dtool, (lambda x: x), image, in_image, "client_ui_dtool", image)
send_jump_select(history2i2i, send_outputs, [gallery, h_index], paras[18], "client_ui_main", others[6], gal_items, "i2i_block")
send_jump_select(history2inp, send_outputs, [gallery, h_index], paras[22], "client_ui_main", others[6], gal_items, "inp_block")
send_jump_select(history2ref, (lambda l, i: None if i == -1 else [l[i]]), [gallery, h_index], paras[15], "client_ui_main", others[5], gal_items)
send_and_jump(history2dtl, send_outputs, [gallery, h_index], in_image, "client_ui_dtool", gal_items)
send_and_jump(tagger2main, (lambda x: x), tags, paras[0], "client_ui_main", tags)
read_image.change(read_info_from_image, inputs=read_image, outputs=[info, png_items])
return website
if __name__ == '__main__':
snapshot_download(repo_id="P01yH3dr0n/naimages", repo_type="dataset", local_dir="./", token=os.environ.get("hf_token"), allow_patterns="*.png")
website = ui()
website.queue(default_concurrency_limit=5)
website.launch(auth=(os.environ.get('account'), os.environ.get('password')), allowed_paths=['tagcomplete'], debug=True)