Commit
•
ceb8e9e
0
Parent(s):
Initial commit
Browse files- .gitattributes +28 -0
- README.md +45 -0
- config.json +1 -0
- diffusion_model.pt +3 -0
- model_index.json +12 -0
- modeling_ddpm.py +61 -0
- scheduler_config.json +8 -0
.gitattributes
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
19 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
diffusion_model.pt filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- ddpm_diffusion
|
4 |
+
---
|
5 |
+
|
6 |
+
# Denoising Diffusion Probabilistic Models (DDPM)
|
7 |
+
|
8 |
+
**Paper**: [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239)
|
9 |
+
|
10 |
+
**Abstract**:
|
11 |
+
|
12 |
+
*We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN.*
|
13 |
+
|
14 |
+
## Usage
|
15 |
+
|
16 |
+
```python
|
17 |
+
# !pip install diffusers
|
18 |
+
from diffusers import DiffusionPipeline
|
19 |
+
import PIL.Image
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
+
model_id = "fusing/ddpm-lsun-bedroom-ema"
|
23 |
+
|
24 |
+
# load model and scheduler
|
25 |
+
ddpm = DiffusionPipeline.from_pretrained(model_id)
|
26 |
+
|
27 |
+
# run pipeline in inference (sample random noise and denoise)
|
28 |
+
image = ddpm()
|
29 |
+
|
30 |
+
# process image to PIL
|
31 |
+
image_processed = image.cpu().permute(0, 2, 3, 1)
|
32 |
+
image_processed = (image_processed + 1.0) * 127.5
|
33 |
+
image_processed = image_processed.numpy().astype(np.uint8)
|
34 |
+
image_pil = PIL.Image.fromarray(image_processed[0])
|
35 |
+
|
36 |
+
# save image
|
37 |
+
image_pil.save("test.png")
|
38 |
+
```
|
39 |
+
|
40 |
+
## Samples
|
41 |
+
|
42 |
+
1. ![sample_1](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/hf/ddpm-lsun-bedroom-ema/image_0.png)
|
43 |
+
2. ![sample_1](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/hf/ddpm-lsun-bedroom-ema/image_1.png)
|
44 |
+
3. ![sample_1](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/hf/ddpm-lsun-bedroom-ema/image_2.png)
|
45 |
+
4. ![sample_1](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/hf/ddpm-lsun-bedroom-ema/image_3.png)
|
config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"_class_name": "UNetModel", "attn_resolutions": [16], "down_blocks": ["UNetResDownBlock2D", "UNetResDownBlock2D", "UNetResDownBlock2D", "UNetResDownBlock2D", "UNetResAttnDownBlock2D", "UNetResDownBlock2D"], "up_blocks": ["UNetResUpBlock2D", "UNetResAttnUpBlock2D", "UNetResUpBlock2D", "UNetResUpBlock2D", "UNetResUpBlock2D", "UNetResUpBlock2D"], "conv_resample": true, "downsample_padding": 0, "num_head_channels": null, "ch": 128, "ch_mult": [1, 1, 2, 2, 4, 4], "block_channels": [128, 128, 256, 256, 512, 512], "resnet_eps": 1e-06, "flip_sin_to_cos": false, "downscale_freq_shift": 1, "dropout": 0.0, "in_channels": 3, "name_or_path": "./ddpm-lsun-church/", "num_res_blocks": 2, "out_ch": 3, "out_channels": 3, "resamp_with_conv": true, "resolution": 256, "image_size": 256}
|
diffusion_model.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1296e7d066e7c3c973dc3f3a27c6adca3a981646ede449f127db566da9abcd5
|
3 |
+
size 470185705
|
model_index.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "DDPM",
|
3 |
+
"_module": "modeling_ddpm.py",
|
4 |
+
"scheduler": [
|
5 |
+
"diffusers",
|
6 |
+
"DDPMScheduler"
|
7 |
+
],
|
8 |
+
"unet": [
|
9 |
+
"diffusers",
|
10 |
+
"UNetModel"
|
11 |
+
]
|
12 |
+
}
|
modeling_ddpm.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
|
17 |
+
from diffusers import DiffusionPipeline
|
18 |
+
import tqdm
|
19 |
+
import torch
|
20 |
+
|
21 |
+
|
22 |
+
class DDPM(DiffusionPipeline):
|
23 |
+
|
24 |
+
modeling_file = "modeling_ddpm.py"
|
25 |
+
|
26 |
+
def __init__(self, unet, noise_scheduler):
|
27 |
+
super().__init__()
|
28 |
+
self.register_modules(unet=unet, noise_scheduler=noise_scheduler)
|
29 |
+
|
30 |
+
def __call__(self, batch_size=1, generator=None, torch_device=None):
|
31 |
+
if torch_device is None:
|
32 |
+
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
33 |
+
|
34 |
+
self.unet.to(torch_device)
|
35 |
+
# 1. Sample gaussian noise
|
36 |
+
image = self.noise_scheduler.sample_noise((batch_size, self.unet.in_channels, self.unet.resolution, self.unet.resolution), device=torch_device, generator=generator)
|
37 |
+
for t in tqdm.tqdm(reversed(range(len(self.noise_scheduler))), total=len(self.noise_scheduler)):
|
38 |
+
# i) define coefficients for time step t
|
39 |
+
clip_image_coeff = 1 / torch.sqrt(self.noise_scheduler.get_alpha_prod(t))
|
40 |
+
clip_noise_coeff = torch.sqrt(1 / self.noise_scheduler.get_alpha_prod(t) - 1)
|
41 |
+
image_coeff = (1 - self.noise_scheduler.get_alpha_prod(t - 1)) * torch.sqrt(self.noise_scheduler.get_alpha(t)) / (1 - self.noise_scheduler.get_alpha_prod(t))
|
42 |
+
clip_coeff = torch.sqrt(self.noise_scheduler.get_alpha_prod(t - 1)) * self.noise_scheduler.get_beta(t) / (1 - self.noise_scheduler.get_alpha_prod(t))
|
43 |
+
|
44 |
+
# ii) predict noise residual
|
45 |
+
with torch.no_grad():
|
46 |
+
noise_residual = self.unet(image, t)
|
47 |
+
|
48 |
+
# iii) compute predicted image from residual
|
49 |
+
# See 2nd formula at https://github.com/hojonathanho/diffusion/issues/5#issue-896554416 for comparison
|
50 |
+
pred_mean = clip_image_coeff * image - clip_noise_coeff * noise_residual
|
51 |
+
pred_mean = torch.clamp(pred_mean, -1, 1)
|
52 |
+
prev_image = clip_coeff * pred_mean + image_coeff * image
|
53 |
+
|
54 |
+
# iv) sample variance
|
55 |
+
prev_variance = self.noise_scheduler.sample_variance(t, prev_image.shape, device=torch_device, generator=generator)
|
56 |
+
|
57 |
+
# v) sample x_{t-1} ~ N(prev_image, prev_variance)
|
58 |
+
sampled_prev_image = prev_image + prev_variance
|
59 |
+
image = sampled_prev_image
|
60 |
+
|
61 |
+
return image
|
scheduler_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "GaussianDDPMScheduler",
|
3 |
+
"beta_end": 0.02,
|
4 |
+
"beta_schedule": "linear",
|
5 |
+
"beta_start": 0.0001,
|
6 |
+
"timesteps": 1000,
|
7 |
+
"variance_type": "fixed_small"
|
8 |
+
}
|