Francesco Lattari
commited on
Commit
•
d1e6b43
1
Parent(s):
81de2ec
cloned model
Browse files- README.md +106 -0
- config.json +57 -0
- diffusion_pytorch_model.fp16.bin +3 -0
- diffusion_pytorch_model.fp16.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: openrail++
|
3 |
+
base_model: runwayml/stable-diffusion-v1-5
|
4 |
+
tags:
|
5 |
+
- stable-diffusion-xl
|
6 |
+
- stable-diffusion-xl-diffusers
|
7 |
+
- text-to-image
|
8 |
+
- diffusers
|
9 |
+
inference: false
|
10 |
+
---
|
11 |
+
|
12 |
+
# SDXL-controlnet: Canny
|
13 |
+
|
14 |
+
These are controlnet weights trained on [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) with canny conditioning. You can find some example images in the following.
|
15 |
+
|
16 |
+
prompt: a couple watching a romantic sunset, 4k photo
|
17 |
+
![images_0)](./out_couple.png)
|
18 |
+
|
19 |
+
prompt: ultrarealistic shot of a furry blue bird
|
20 |
+
![images_1)](./out_bird.png)
|
21 |
+
|
22 |
+
prompt: a woman, close up, detailed, beautiful, street photography, photorealistic, detailed, Kodak ektar 100, natural, candid shot
|
23 |
+
![images_2)](./out_women.png)
|
24 |
+
|
25 |
+
prompt: Cinematic, neoclassical table in the living room, cinematic, contour, lighting, highly detailed, winter, golden hour
|
26 |
+
![images_3)](./out_room.png)
|
27 |
+
|
28 |
+
prompt: a tornado hitting grass field, 1980's film grain. overcast, muted colors.
|
29 |
+
![images_0)](./out_tornado.png)
|
30 |
+
|
31 |
+
## Usage
|
32 |
+
|
33 |
+
Make sure to first install the libraries:
|
34 |
+
|
35 |
+
```bash
|
36 |
+
pip install accelerate transformers safetensors opencv-python diffusers
|
37 |
+
```
|
38 |
+
|
39 |
+
And then we're ready to go:
|
40 |
+
|
41 |
+
```python
|
42 |
+
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL
|
43 |
+
from diffusers.utils import load_image
|
44 |
+
from PIL import Image
|
45 |
+
import torch
|
46 |
+
import numpy as np
|
47 |
+
import cv2
|
48 |
+
|
49 |
+
prompt = "aerial view, a futuristic research complex in a bright foggy jungle, hard lighting"
|
50 |
+
negative_prompt = 'low quality, bad quality, sketches'
|
51 |
+
|
52 |
+
image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/hf-logo.png")
|
53 |
+
|
54 |
+
controlnet_conditioning_scale = 0.5 # recommended for good generalization
|
55 |
+
|
56 |
+
controlnet = ControlNetModel.from_pretrained(
|
57 |
+
"diffusers/controlnet-canny-sdxl-1.0",
|
58 |
+
torch_dtype=torch.float16
|
59 |
+
)
|
60 |
+
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
61 |
+
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
62 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
63 |
+
controlnet=controlnet,
|
64 |
+
vae=vae,
|
65 |
+
torch_dtype=torch.float16,
|
66 |
+
)
|
67 |
+
pipe.enable_model_cpu_offload()
|
68 |
+
|
69 |
+
image = np.array(image)
|
70 |
+
image = cv2.Canny(image, 100, 200)
|
71 |
+
image = image[:, :, None]
|
72 |
+
image = np.concatenate([image, image, image], axis=2)
|
73 |
+
image = Image.fromarray(image)
|
74 |
+
|
75 |
+
images = pipe(
|
76 |
+
prompt, negative_prompt=negative_prompt, image=image, controlnet_conditioning_scale=controlnet_conditioning_scale,
|
77 |
+
).images
|
78 |
+
|
79 |
+
images[0].save(f"hug_lab.png")
|
80 |
+
```
|
81 |
+
|
82 |
+
![images_10)](./out_hug_lab_7.png)
|
83 |
+
|
84 |
+
To more details, check out the official documentation of [`StableDiffusionXLControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/controlnet_sdxl).
|
85 |
+
|
86 |
+
### Training
|
87 |
+
|
88 |
+
Our training script was built on top of the official training script that we provide [here](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/README_sdxl.md).
|
89 |
+
|
90 |
+
#### Training data
|
91 |
+
This checkpoint was first trained for 20,000 steps on laion 6a resized to a max minimum dimension of 384.
|
92 |
+
It was then further trained for 20,000 steps on laion 6a resized to a max minimum dimension of 1024 and
|
93 |
+
then filtered to contain only minimum 1024 images. We found the further high resolution finetuning was
|
94 |
+
necessary for image quality.
|
95 |
+
|
96 |
+
#### Compute
|
97 |
+
one 8xA100 machine
|
98 |
+
|
99 |
+
#### Batch size
|
100 |
+
Data parallel with a single gpu batch size of 8 for a total batch size of 64.
|
101 |
+
|
102 |
+
#### Hyper Parameters
|
103 |
+
Constant learning rate of 1e-4 scaled by batch size for total learning rate of 64e-4
|
104 |
+
|
105 |
+
#### Mixed precision
|
106 |
+
fp16
|
config.json
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlNetModel",
|
3 |
+
"_diffusers_version": "0.20.0.dev0",
|
4 |
+
"_name_or_path": "../controlnet-1-0-canny/checkpoint-20000/controlnet",
|
5 |
+
"act_fn": "silu",
|
6 |
+
"addition_embed_type": "text_time",
|
7 |
+
"addition_embed_type_num_heads": 64,
|
8 |
+
"addition_time_embed_dim": 256,
|
9 |
+
"attention_head_dim": [
|
10 |
+
5,
|
11 |
+
10,
|
12 |
+
20
|
13 |
+
],
|
14 |
+
"block_out_channels": [
|
15 |
+
320,
|
16 |
+
640,
|
17 |
+
1280
|
18 |
+
],
|
19 |
+
"class_embed_type": null,
|
20 |
+
"conditioning_channels": 3,
|
21 |
+
"conditioning_embedding_out_channels": [
|
22 |
+
16,
|
23 |
+
32,
|
24 |
+
96,
|
25 |
+
256
|
26 |
+
],
|
27 |
+
"controlnet_conditioning_channel_order": "rgb",
|
28 |
+
"cross_attention_dim": 2048,
|
29 |
+
"down_block_types": [
|
30 |
+
"DownBlock2D",
|
31 |
+
"CrossAttnDownBlock2D",
|
32 |
+
"CrossAttnDownBlock2D"
|
33 |
+
],
|
34 |
+
"downsample_padding": 1,
|
35 |
+
"encoder_hid_dim": null,
|
36 |
+
"encoder_hid_dim_type": null,
|
37 |
+
"flip_sin_to_cos": true,
|
38 |
+
"freq_shift": 0,
|
39 |
+
"global_pool_conditions": false,
|
40 |
+
"in_channels": 4,
|
41 |
+
"layers_per_block": 2,
|
42 |
+
"mid_block_scale_factor": 1,
|
43 |
+
"norm_eps": 1e-05,
|
44 |
+
"norm_num_groups": 32,
|
45 |
+
"num_attention_heads": null,
|
46 |
+
"num_class_embeds": null,
|
47 |
+
"only_cross_attention": false,
|
48 |
+
"projection_class_embeddings_input_dim": 2816,
|
49 |
+
"resnet_time_scale_shift": "default",
|
50 |
+
"transformer_layers_per_block": [
|
51 |
+
1,
|
52 |
+
2,
|
53 |
+
10
|
54 |
+
],
|
55 |
+
"upcast_attention": null,
|
56 |
+
"use_linear_projection": true
|
57 |
+
}
|
diffusion_pytorch_model.fp16.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a42da57d6e2fd6ec786ccfea1cf1a06d2c1d91b2d8a14c7de3a67553b10b2948
|
3 |
+
size 2502401039
|
diffusion_pytorch_model.fp16.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:702c15197c89a18e8bd5eaff5ea61793ce35964853c23953a5e3a0c96820cf52
|
3 |
+
size 2256535552
|