shichen1231's picture
Update app.py
0525e06
from PIL import Image
import gradio as gr
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import torch
torch.backends.cuda.matmul.allow_tf32 = True
import gc
model_id = "andite/pastel-mix"
model_url = "https://huggingface.co/andite/pastel-mix/blob/main/pastelmix-better-vae.ckpt"
controlnet = ControlNetModel.from_pretrained("ioclab/connow", torch_dtype=torch.float16, use_safetensors=True)
pipe = StableDiffusionControlNetPipeline.from_from_ckpt(
model_url,
controlnet=controlnet,
torch_dtype=torch.float16,
safety_checker=None,
)
pipe = pipe.to("cuda")
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload()
pipe.enable_attention_slicing()
def infer(
prompt,
negative_prompt,
conditioning_image,
num_inference_steps=30,
size=768,
guidance_scale=7.0,
seed=1234,
ill=0.6,
):
conditioning_image_raw = Image.fromarray(conditioning_image)
conditioning_image = conditioning_image_raw.convert('L')
w = conditioning_image.width
h = conditioning_image.height
ratio = 768 / max(w, h)
w = int(w * ratio)
h = int(h * ratio)
g_cpu = torch.Generator()
if seed == -1:
generator = g_cpu.manual_seed(g_cpu.seed())
else:
generator = g_cpu.manual_seed(seed)
output_image = pipe(
prompt,
conditioning_image,
height=h,
width=w,
num_inference_steps=num_inference_steps,
generator=generator,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
controlnet_conditioning_scale=ill,
).images[0]
del conditioning_image, conditioning_image_raw
gc.collect()
return output_image
with gr.Blocks() as demo:
gr.Markdown(
"""
# ControlNet on Brightness
This is a demo on ControlNet based on brightness.
""")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(
label="Prompt",
)
negative_prompt = gr.Textbox(
label="Negative Prompt",
)
conditioning_image = gr.Image(
label="Conditioning Image",
)
with gr.Accordion('Advanced options', open=False):
with gr.Row():
num_inference_steps = gr.Slider(
10, 40, 20,
step=1,
label="Steps",
)
size = gr.Slider(
256, 768, 512,
step=128,
label="Size",
)
with gr.Row():
guidance_scale = gr.Slider(
label='Guidance Scale',
minimum=0.1,
maximum=30.0,
value=7.0,
step=0.1
)
seed = gr.Slider(
label='Seed',
value=-1,
minimum=-1,
maximum=2147483647,
step=1,
# randomize=True
)
with gr.Row():
ill = gr.Slider(
label='controlnet_ILL_scale',
minimum=0,
maximum=1,
value=0.6,
step=0.05
)
submit_btn = gr.Button(
value="Submit",
variant="primary"
)
with gr.Column(min_width=300):
output = gr.Image(
label="Result",
)
submit_btn.click(
fn=infer,
inputs=[
prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed,ill,
],
outputs=output
)
gr.Examples(
examples=[
["masterpiece, best quality, High contrast,A bamboo forest, a stream,The rising sun, colorful,", "((nsfw)),(blush),(bare),(worst quality:2, low quality:2),(zombie, sketch, interlocked fingers), greyscale", "./conditioning_images/ty1.jpg","./conditioning_images/tyt1.jpg"],
["masterpiece, best quality, High contrast,A bamboo forest, a stream,The rising sun, colorful,", "((nsfw)),(blush),(bare),(worst quality:2, low quality:2),(zombie, sketch, interlocked fingers), greyscale", "./conditioning_images/ty2.jpg","./conditioning_images/tyt2.jpg"],
],
inputs=[
prompt, negative_prompt, conditioning_image
],
outputs=output,
fn=infer,
cache_examples=True,
)
gr.Markdown(
"""
* [Dataset](https://huggingface.co/datasets/ioclab/grayscale_image_aesthetic_3M) Note that this was handled extra, and a preview version of the processing is here
[Anime Dataset](https://huggingface.co/datasets/ioclab/lighttestout) [Nature Dataset] (https://huggingface.co/datasets/ioclab/light)
* [Diffusers model](https://huggingface.co/ioclab/connow/tree/main), [Web UI model](https://huggingface.co/ioclab/control_v1u_sd15_illumination_webui)
* [Training Report](https://huggingface.co/ioclab/control_v1u_sd15_illumination_webui), [Doc(Chinese)](https://aigc.ioclab.com/sd-showcase/light_controlnet.html)
""")
demo.launch()