benjamin-paine
commited on
Commit
•
6334eed
1
Parent(s):
a04672e
Update README.md
Browse files
README.md
CHANGED
@@ -6,21 +6,27 @@ This repository contains offset versions of https://huggingface.co/mhdang/dpo-sd
|
|
6 |
These can be added directly to any initialized UNet to inject DPO training into it. See the code below for usage (diffusers only.)
|
7 |
|
8 |
```py
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
"""
|
11 |
Injects DPO weights directly into your UNet.
|
12 |
|
13 |
Args:
|
14 |
unet (`UNet2DConditionModel`)
|
15 |
The initialized UNet from your pipeline.
|
16 |
-
|
17 |
The path to the `.safetensors` file downloaded from https://huggingface.co/benjamin-paine/sd-dpo-offsets/.
|
18 |
Make sure you're using the right file for the right base model.
|
19 |
strict (`bool`, *optional*)
|
20 |
Whether or not to raise errors when a weight cannot be applied. Defaults to false.
|
21 |
"""
|
22 |
from safetensors import safe_open
|
23 |
-
with safe_open(dpo_offset_path, framework="pt", device=
|
24 |
for key in f.keys():
|
25 |
key_parts = key.split(".")
|
26 |
current_layer = unet
|
@@ -35,9 +41,11 @@ def inject_dpo(unet: UNet2DConditionModel, dpo_path: str, strict: bool = False)
|
|
35 |
layer_param = getattr(current_layer, key_parts[-1], None)
|
36 |
if layer_param is None:
|
37 |
if strict:
|
38 |
-
raise IOError(f"Couldn't get
|
|
|
39 |
layer_param.data += f.get_tensor(key)
|
40 |
```
|
|
|
41 |
Now you can use this function like so:
|
42 |
|
43 |
```py
|
@@ -46,36 +54,68 @@ import huggingface_hub
|
|
46 |
import torch
|
47 |
|
48 |
# load sdv15 pipeline
|
|
|
49 |
model_id = "Lykon/dreamshaper-8"
|
50 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
51 |
-
|
52 |
-
# download DPO offsets
|
53 |
-
dpo_path = huggingface_hub.hf_hub_download("painebenjamin/sd-dpo-offsets", "sd_v15_unet_dpo_offset.safetensors")
|
54 |
-
# inject
|
55 |
-
inject_dpo(pipe.unet, dpo_path)
|
56 |
|
57 |
# make image
|
58 |
prompt = "Two cats playing chess on a tree branch"
|
59 |
-
|
|
|
|
|
60 |
image.save("cats_playing_chess.png")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
```
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
Or for XL:
|
64 |
|
65 |
```py
|
66 |
from diffusers import StableDiffusionXLPipeline
|
|
|
|
|
67 |
|
68 |
-
# load
|
|
|
69 |
model_id = "Lykon/dreamshaper-xl-1-0"
|
70 |
-
pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=torch.float16
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
# download DPO offsets
|
73 |
-
|
74 |
# inject
|
75 |
-
inject_dpo(pipe.unet,
|
76 |
|
77 |
-
# make image
|
78 |
-
|
79 |
-
image = pipe(prompt, guidance_scale=7.5).images[0]
|
80 |
-
image.save("
|
81 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
These can be added directly to any initialized UNet to inject DPO training into it. See the code below for usage (diffusers only.)
|
7 |
|
8 |
```py
|
9 |
+
from __future__ import annotations
|
10 |
+
|
11 |
+
from typing import TYPE_CHECKING
|
12 |
+
if TYPE_CHECKING:
|
13 |
+
from diffusers.models import UNet2DConditionModel
|
14 |
+
|
15 |
+
def inject_dpo(unet: UNet2DConditionModel, dpo_offset_path: str, device: str, strict: bool = False) -> None:
|
16 |
"""
|
17 |
Injects DPO weights directly into your UNet.
|
18 |
|
19 |
Args:
|
20 |
unet (`UNet2DConditionModel`)
|
21 |
The initialized UNet from your pipeline.
|
22 |
+
dpo_offset_path (`str`)
|
23 |
The path to the `.safetensors` file downloaded from https://huggingface.co/benjamin-paine/sd-dpo-offsets/.
|
24 |
Make sure you're using the right file for the right base model.
|
25 |
strict (`bool`, *optional*)
|
26 |
Whether or not to raise errors when a weight cannot be applied. Defaults to false.
|
27 |
"""
|
28 |
from safetensors import safe_open
|
29 |
+
with safe_open(dpo_offset_path, framework="pt", device=device) as f:
|
30 |
for key in f.keys():
|
31 |
key_parts = key.split(".")
|
32 |
current_layer = unet
|
|
|
41 |
layer_param = getattr(current_layer, key_parts[-1], None)
|
42 |
if layer_param is None:
|
43 |
if strict:
|
44 |
+
raise IOError(f"Couldn't get weight parameter for key {key}")
|
45 |
+
continue
|
46 |
layer_param.data += f.get_tensor(key)
|
47 |
```
|
48 |
+
|
49 |
Now you can use this function like so:
|
50 |
|
51 |
```py
|
|
|
54 |
import torch
|
55 |
|
56 |
# load sdv15 pipeline
|
57 |
+
device = "cuda"
|
58 |
model_id = "Lykon/dreamshaper-8"
|
59 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
60 |
+
pipe.to(device)
|
|
|
|
|
|
|
|
|
61 |
|
62 |
# make image
|
63 |
prompt = "Two cats playing chess on a tree branch"
|
64 |
+
generator = torch.Generator(device=device)
|
65 |
+
generator.manual_seed(123456789)
|
66 |
+
image = pipe(prompt, guidance_scale=7.5, generator=generator).images[0]
|
67 |
image.save("cats_playing_chess.png")
|
68 |
+
|
69 |
+
# download DPO offsets
|
70 |
+
dpo_offset_path = huggingface_hub.hf_hub_download("benjamin-paine/sd-dpo-offsets", "sd_v15_unet_dpo_offset.safetensors")
|
71 |
+
# inject
|
72 |
+
inject_dpo(pipe.unet, dpo_offset_path, device)
|
73 |
+
|
74 |
+
# make image again
|
75 |
+
generator.manual_seed(123456789)
|
76 |
+
image = pipe(prompt, guidance_scale=7.5, generator=generator).images[0]
|
77 |
+
image.save("cats_playing_chess_dpo.png")
|
78 |
```
|
79 |
|
80 |
+
`cats_playing_chess.png`
|
81 |
+
![image/png](https://cdn-uploads.huggingface.co/production/uploads/64429aaf7feb866811b12f73/KqAohfKMXKVGTDpuBhhx6.png)
|
82 |
+
|
83 |
+
`cats_playing_chess_dpo.png`
|
84 |
+
![image/png](https://cdn-uploads.huggingface.co/production/uploads/64429aaf7feb866811b12f73/fY9j1q8ZazyNP4JbD0TTU.png)
|
85 |
+
|
86 |
Or for XL:
|
87 |
|
88 |
```py
|
89 |
from diffusers import StableDiffusionXLPipeline
|
90 |
+
import huggingface_hub
|
91 |
+
import torch
|
92 |
|
93 |
+
# load sdv15 pipeline
|
94 |
+
device = "cuda"
|
95 |
model_id = "Lykon/dreamshaper-xl-1-0"
|
96 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
97 |
+
pipe.to(device)
|
98 |
+
|
99 |
+
# make image
|
100 |
+
prompt = "Two cats playing chess on a tree branch"
|
101 |
+
generator = torch.Generator(device=device)
|
102 |
+
generator.manual_seed(123456789)
|
103 |
+
image = pipe(prompt, guidance_scale=7.5, generator=generator).images[0]
|
104 |
+
image.save("cats_playing_chess_xl.png")
|
105 |
|
106 |
# download DPO offsets
|
107 |
+
dpo_offset_path = huggingface_hub.hf_hub_download("benjamin-paine/sd-dpo-offsets", "sd_v15_unet_dpo_offset.safetensors")
|
108 |
# inject
|
109 |
+
inject_dpo(pipe.unet, dpo_offset_path, device)
|
110 |
|
111 |
+
# make image again
|
112 |
+
generator.manual_seed(123456789)
|
113 |
+
image = pipe(prompt, guidance_scale=7.5, generator=generator).images[0]
|
114 |
+
image.save("cats_playing_chess_xl_dpo.png")
|
115 |
+
```
|
116 |
+
|
117 |
+
`cats_playing_chess_xl.png`
|
118 |
+
![image/png](https://cdn-uploads.huggingface.co/production/uploads/64429aaf7feb866811b12f73/BufmVzFBsoYX_jipzErIo.png)
|
119 |
+
|
120 |
+
`cats_playing_chess_xl_dpo.png`
|
121 |
+
![image/png](https://cdn-uploads.huggingface.co/production/uploads/64429aaf7feb866811b12f73/Rj9FXI-vmrMwvepMSLMe7.png)
|