fffiloni's picture
Update app.py
df317e7 verified
raw
history blame contribute delete
No virus
10.9 kB
import spaces
import gradio as gr
import os
import numpy as np
import random
from huggingface_hub import login, ModelCard
import torch
from diffusers import StableDiffusionXLPipeline, AutoencoderKL
from blora_utils import BLOCKS, filter_lora, scale_lora
is_shared_ui = True if "fffiloni/B-LoRa-Inference" in os.environ['SPACE_ID'] else False
hf_token = os.environ.get("YOUR_HF_TOKEN_WITH_READ_PERMISSION")
login(token=hf_token)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
SAMPLE_MODEL_IDS = [
'lora-library/B-LoRA-teddybear',
'lora-library/B-LoRA-bull',
'lora-library/B-LoRA-wolf_plushie',
'lora-library/B-LoRA-pen_sketch',
'lora-library/B-LoRA-cartoon_line',
'lora-library/B-LoRA-child',
'lora-library/B-LoRA-vase',
'lora-library/B-LoRA-scary_mug',
'lora-library/B-LoRA-statue',
'lora-library/B-LoRA-colorful_teapot',
'lora-library/B-LoRA-grey_sloth_plushie',
'lora-library/B-LoRA-teapot',
'lora-library/B-LoRA-backpack_dog',
'lora-library/B-LoRA-buddha',
'lora-library/B-LoRA-dog6',
'lora-library/B-LoRA-poop_emoji',
'lora-library/B-LoRA-pot',
'lora-library/B-LoRA-fat_bird',
'lora-library/B-LoRA-elephant',
'lora-library/B-LoRA-metal_bird',
'lora-library/B-LoRA-cat',
'lora-library/B-LoRA-dog2',
'lora-library/B-LoRA-drawing1',
'lora-library/B-LoRA-village_oil',
'lora-library/B-LoRA-watercolor',
'lora-library/B-LoRA-house_3d',
'lora-library/B-LoRA-ink_sketch',
'lora-library/B-LoRA-drawing3',
'lora-library/B-LoRA-crayon_drawing',
'lora-library/B-LoRA-kiss',
'lora-library/B-LoRA-drawing4',
'lora-library/B-LoRA-working_cartoon',
'lora-library/B-LoRA-painting',
'lora-library/B-LoRA-drawing2'
'lora-library/B-LoRA-multi-dog2',
]
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipeline = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
vae=vae,
torch_dtype=torch.float16,
).to("cuda")
def load_b_lora_to_unet(pipe, content_lora_model_id: str = '', style_lora_model_id: str = '', content_alpha: float = 1.,
style_alpha: float = 1.) -> None:
try:
# Get Content B-LoRA SD
if content_lora_model_id:
content_B_LoRA_sd, _ = pipe.lora_state_dict(content_lora_model_id, use_auth_token=True)
content_B_LoRA = filter_lora(content_B_LoRA_sd, BLOCKS['content'])
content_B_LoRA = scale_lora(content_B_LoRA, content_alpha)
else:
content_B_LoRA = {}
# Get Style B-LoRA SD
if style_lora_model_id:
style_B_LoRA_sd, _ = pipe.lora_state_dict(style_lora_model_id, use_auth_token=True)
style_B_LoRA = filter_lora(style_B_LoRA_sd, BLOCKS['style'])
style_B_LoRA = scale_lora(style_B_LoRA, style_alpha)
else:
style_B_LoRA = {}
# Merge B-LoRAs SD
res_lora = {**content_B_LoRA, **style_B_LoRA}
# Load
pipe.load_lora_into_unet(res_lora, None, pipe.unet)
except Exception as e:
raise type(e)(f'failed to load_b_lora_to_unet, due to: {e}')
def load_b_loras(content_b_lora, style_b_lora):
pipeline.unload_lora_weights()
if content_b_lora != "" and content_b_lora is not None:
# Get instance_prompt a.k.a trigger word
content_model_card = ModelCard.load(content_b_lora)
content_model_repo_data = content_model_card.data.to_dict()
content_model_instance_prompt = content_model_repo_data.get("instance_prompt")
else:
content_model_instance_prompt = ''
if style_b_lora != "" and style_b_lora is not None:
# Get instance_prompt a.k.a trigger word
style_model_card = ModelCard.load(style_b_lora)
style_model_repo_data = style_model_card.data.to_dict()
style_model_instance_prompt = style_model_repo_data.get("instance_prompt")
style_model_instance_prompt = f"in {style_model_instance_prompt} style"
else:
style_model_instance_prompt = ''
prepared_prompt = f"{content_model_instance_prompt} {style_model_instance_prompt}"
return prepared_prompt
@spaces.GPU()
def main(content_b_lora, style_b_lora, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
if content_b_lora is None:
content_B_LoRA_path = ''
else:
content_B_LoRA_path = content_b_lora
if style_b_lora is None:
style_B_LoRA_path = ''
else:
style_B_LoRA_path = style_b_lora
content_alpha,style_alpha = 1,1.1
load_b_lora_to_unet(pipeline, content_B_LoRA_path, style_B_LoRA_path, content_alpha, style_alpha)
prompt = prompt
image = pipeline(
prompt,
generator=generator,
num_images_per_prompt=1,
width = width,
height = height,
).images[0]
return image, seed
css="""
#col-container {
margin: 0 auto;
max-width: 720px;
}
div#warning-duplicate {
background-color: #ebf5ff;
padding: 0 16px 16px;
margin: 20px 0;
}
div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
color: #0f4592!important;
}
div#warning-duplicate strong {
color: #0f4592;
}
p.actions {
display: flex;
align-items: center;
margin: 20px 0;
}
div#warning-duplicate .actions a {
display: inline-block;
margin-right: 10px;
}
.custom-color {
color: #030303 !important;
}
"""
if torch.cuda.is_available():
power_device = "GPU"
else:
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
if is_shared_ui:
top_description = gr.HTML(f'''
<div class="gr-prose">
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
Note: you might want to use a private custom B-LoRa model</h2>
<p class="main-message custom-color">
To do so, <strong>duplicate the Space</strong> and run it on your own profile using <strong>your own access token</strong> and eventually a GPU (T4-small or A10G-small) for faster inference without waiting in the queue.<br />
</p>
<p class="actions custom-color">
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
</a>
to start using private models and skip the queue
</p>
</div>
''', elem_id="warning-duplicate")
gr.Markdown(f"""
# B-LoRas Inference
Currently running on {power_device}.
""")
with gr.Row():
content_b_lora = gr.Dropdown(
label="B-LoRa for content",
allow_custom_value=True,
choices=SAMPLE_MODEL_IDS
)
style_b_lora = gr.Dropdown(
label="B-LoRa for style",
allow_custom_value=True,
choices=SAMPLE_MODEL_IDS
)
with gr.Column():
load_b_loras_btn = gr.Button("load models")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False, format="png")
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=50,
)
load_b_loras_btn.click(
fn = load_b_loras,
inputs = [content_b_lora, style_b_lora],
outputs = [prompt]
)
run_button.click(
fn = main,
inputs = [content_b_lora, style_b_lora, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
demo.queue().launch()