birgermoell
commited on
Commit
•
7258d24
1
Parent(s):
8bde354
Create train_custom_model.py
Browse files- train_custom_model.py +1143 -0
train_custom_model.py
ADDED
@@ -0,0 +1,1143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding=utf-8
|
3 |
+
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
|
16 |
+
import argparse
|
17 |
+
import logging
|
18 |
+
import math
|
19 |
+
import os
|
20 |
+
import random
|
21 |
+
import time
|
22 |
+
from pathlib import Path
|
23 |
+
|
24 |
+
import jax
|
25 |
+
import jax.numpy as jnp
|
26 |
+
import numpy as np
|
27 |
+
import optax
|
28 |
+
import torch
|
29 |
+
import torch.utils.checkpoint
|
30 |
+
import transformers
|
31 |
+
from datasets import load_dataset, load_from_disk
|
32 |
+
from flax import jax_utils
|
33 |
+
from flax.core.frozen_dict import unfreeze
|
34 |
+
from flax.training import train_state
|
35 |
+
from flax.training.common_utils import shard
|
36 |
+
from huggingface_hub import create_repo, upload_folder
|
37 |
+
from PIL import Image, PngImagePlugin
|
38 |
+
from torch.utils.data import IterableDataset
|
39 |
+
from torchvision import transforms
|
40 |
+
from tqdm.auto import tqdm
|
41 |
+
from transformers import CLIPTokenizer, FlaxCLIPTextModel, set_seed
|
42 |
+
|
43 |
+
from diffusers import (
|
44 |
+
FlaxAutoencoderKL,
|
45 |
+
FlaxControlNetModel,
|
46 |
+
FlaxDDPMScheduler,
|
47 |
+
FlaxStableDiffusionControlNetPipeline,
|
48 |
+
FlaxUNet2DConditionModel,
|
49 |
+
)
|
50 |
+
from diffusers.utils import check_min_version, is_wandb_available
|
51 |
+
|
52 |
+
|
53 |
+
# To prevent an error that occurs when there are abnormally large compressed data chunk in the png image
|
54 |
+
# see more https://github.com/python-pillow/Pillow/issues/5610
|
55 |
+
LARGE_ENOUGH_NUMBER = 100
|
56 |
+
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
|
57 |
+
|
58 |
+
if is_wandb_available():
|
59 |
+
import wandb
|
60 |
+
|
61 |
+
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
62 |
+
check_min_version("0.16.0.dev0")
|
63 |
+
|
64 |
+
logger = logging.getLogger(__name__)
|
65 |
+
|
66 |
+
|
67 |
+
def image_grid(imgs, rows, cols):
|
68 |
+
assert len(imgs) == rows * cols
|
69 |
+
|
70 |
+
w, h = imgs[0].size
|
71 |
+
grid = Image.new("RGB", size=(cols * w, rows * h))
|
72 |
+
grid_w, grid_h = grid.size
|
73 |
+
|
74 |
+
for i, img in enumerate(imgs):
|
75 |
+
grid.paste(img, box=(i % cols * w, i // cols * h))
|
76 |
+
return grid
|
77 |
+
|
78 |
+
|
79 |
+
def log_validation(controlnet, controlnet_params, tokenizer, args, rng, weight_dtype):
|
80 |
+
logger.info("Running validation... ")
|
81 |
+
|
82 |
+
pipeline, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
|
83 |
+
args.pretrained_model_name_or_path,
|
84 |
+
tokenizer=tokenizer,
|
85 |
+
controlnet=controlnet,
|
86 |
+
safety_checker=None,
|
87 |
+
dtype=weight_dtype,
|
88 |
+
revision=args.revision,
|
89 |
+
from_pt=args.from_pt,
|
90 |
+
)
|
91 |
+
params = jax_utils.replicate(params)
|
92 |
+
params["controlnet"] = controlnet_params
|
93 |
+
|
94 |
+
num_samples = jax.device_count()
|
95 |
+
prng_seed = jax.random.split(rng, jax.device_count())
|
96 |
+
|
97 |
+
if len(args.validation_image) == len(args.validation_prompt):
|
98 |
+
validation_images = args.validation_image
|
99 |
+
validation_prompts = args.validation_prompt
|
100 |
+
elif len(args.validation_image) == 1:
|
101 |
+
validation_images = args.validation_image * len(args.validation_prompt)
|
102 |
+
validation_prompts = args.validation_prompt
|
103 |
+
elif len(args.validation_prompt) == 1:
|
104 |
+
validation_images = args.validation_image
|
105 |
+
validation_prompts = args.validation_prompt * len(args.validation_image)
|
106 |
+
else:
|
107 |
+
raise ValueError(
|
108 |
+
"number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`"
|
109 |
+
)
|
110 |
+
|
111 |
+
image_logs = []
|
112 |
+
|
113 |
+
for validation_prompt, validation_image in zip(validation_prompts, validation_images):
|
114 |
+
prompts = num_samples * [validation_prompt]
|
115 |
+
prompt_ids = pipeline.prepare_text_inputs(prompts)
|
116 |
+
prompt_ids = shard(prompt_ids)
|
117 |
+
|
118 |
+
validation_image = Image.open(validation_image).convert("RGB")
|
119 |
+
processed_image = pipeline.prepare_image_inputs(num_samples * [validation_image])
|
120 |
+
processed_image = shard(processed_image)
|
121 |
+
images = pipeline(
|
122 |
+
prompt_ids=prompt_ids,
|
123 |
+
image=processed_image,
|
124 |
+
params=params,
|
125 |
+
prng_seed=prng_seed,
|
126 |
+
num_inference_steps=50,
|
127 |
+
jit=True,
|
128 |
+
).images
|
129 |
+
|
130 |
+
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
|
131 |
+
images = pipeline.numpy_to_pil(images)
|
132 |
+
|
133 |
+
image_logs.append(
|
134 |
+
{"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt}
|
135 |
+
)
|
136 |
+
|
137 |
+
if args.report_to == "wandb":
|
138 |
+
formatted_images = []
|
139 |
+
for log in image_logs:
|
140 |
+
images = log["images"]
|
141 |
+
validation_prompt = log["validation_prompt"]
|
142 |
+
validation_image = log["validation_image"]
|
143 |
+
|
144 |
+
formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning"))
|
145 |
+
for image in images:
|
146 |
+
image = wandb.Image(image, caption=validation_prompt)
|
147 |
+
formatted_images.append(image)
|
148 |
+
|
149 |
+
wandb.log({"validation": formatted_images})
|
150 |
+
else:
|
151 |
+
logger.warn(f"image logging not implemented for {args.report_to}")
|
152 |
+
|
153 |
+
return image_logs
|
154 |
+
|
155 |
+
|
156 |
+
def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None):
|
157 |
+
img_str = ""
|
158 |
+
if image_logs is not None:
|
159 |
+
for i, log in enumerate(image_logs):
|
160 |
+
images = log["images"]
|
161 |
+
validation_prompt = log["validation_prompt"]
|
162 |
+
validation_image = log["validation_image"]
|
163 |
+
validation_image.save(os.path.join(repo_folder, "image_control.png"))
|
164 |
+
img_str += f"prompt: {validation_prompt}\n"
|
165 |
+
images = [validation_image] + images
|
166 |
+
image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png"))
|
167 |
+
img_str += f"![images_{i})](./images_{i}.png)\n"
|
168 |
+
|
169 |
+
yaml = f"""
|
170 |
+
---
|
171 |
+
license: creativeml-openrail-m
|
172 |
+
base_model: {base_model}
|
173 |
+
tags:
|
174 |
+
- stable-diffusion
|
175 |
+
- stable-diffusion-diffusers
|
176 |
+
- text-to-image
|
177 |
+
- diffusers
|
178 |
+
- controlnet
|
179 |
+
- jax-diffusers-event
|
180 |
+
inference: true
|
181 |
+
---
|
182 |
+
"""
|
183 |
+
model_card = f"""
|
184 |
+
# controlnet- {repo_id}
|
185 |
+
|
186 |
+
These are controlnet weights trained on {base_model} with new type of conditioning. You can find some example images in the following. \n
|
187 |
+
{img_str}
|
188 |
+
"""
|
189 |
+
with open(os.path.join(repo_folder, "README.md"), "w") as f:
|
190 |
+
f.write(yaml + model_card)
|
191 |
+
|
192 |
+
|
193 |
+
def parse_args():
|
194 |
+
parser = argparse.ArgumentParser(description="Simple example of a training script.")
|
195 |
+
parser.add_argument(
|
196 |
+
"--pretrained_model_name_or_path",
|
197 |
+
type=str,
|
198 |
+
required=True,
|
199 |
+
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
200 |
+
)
|
201 |
+
parser.add_argument(
|
202 |
+
"--controlnet_model_name_or_path",
|
203 |
+
type=str,
|
204 |
+
default=None,
|
205 |
+
help="Path to pretrained controlnet model or model identifier from huggingface.co/models."
|
206 |
+
" If not specified controlnet weights are initialized from unet.",
|
207 |
+
)
|
208 |
+
parser.add_argument(
|
209 |
+
"--revision",
|
210 |
+
type=str,
|
211 |
+
default=None,
|
212 |
+
help="Revision of pretrained model identifier from huggingface.co/models.",
|
213 |
+
)
|
214 |
+
parser.add_argument(
|
215 |
+
"--from_pt",
|
216 |
+
action="store_true",
|
217 |
+
help="Load the pretrained model from a PyTorch checkpoint.",
|
218 |
+
)
|
219 |
+
parser.add_argument(
|
220 |
+
"--controlnet_revision",
|
221 |
+
type=str,
|
222 |
+
default=None,
|
223 |
+
help="Revision of controlnet model identifier from huggingface.co/models.",
|
224 |
+
)
|
225 |
+
parser.add_argument(
|
226 |
+
"--profile_steps",
|
227 |
+
type=int,
|
228 |
+
default=0,
|
229 |
+
help="How many training steps to profile in the beginning.",
|
230 |
+
)
|
231 |
+
parser.add_argument(
|
232 |
+
"--profile_validation",
|
233 |
+
action="store_true",
|
234 |
+
help="Whether to profile the (last) validation.",
|
235 |
+
)
|
236 |
+
parser.add_argument(
|
237 |
+
"--profile_memory",
|
238 |
+
action="store_true",
|
239 |
+
help="Whether to dump an initial (before training loop) and a final (at program end) memory profile.",
|
240 |
+
)
|
241 |
+
parser.add_argument(
|
242 |
+
"--ccache",
|
243 |
+
type=str,
|
244 |
+
default=None,
|
245 |
+
help="Enables compilation cache.",
|
246 |
+
)
|
247 |
+
parser.add_argument(
|
248 |
+
"--controlnet_from_pt",
|
249 |
+
action="store_true",
|
250 |
+
help="Load the controlnet model from a PyTorch checkpoint.",
|
251 |
+
)
|
252 |
+
parser.add_argument(
|
253 |
+
"--tokenizer_name",
|
254 |
+
type=str,
|
255 |
+
default=None,
|
256 |
+
help="Pretrained tokenizer name or path if not the same as model_name",
|
257 |
+
)
|
258 |
+
parser.add_argument(
|
259 |
+
"--output_dir",
|
260 |
+
type=str,
|
261 |
+
default="runs/{timestamp}",
|
262 |
+
help="The output directory where the model predictions and checkpoints will be written. "
|
263 |
+
"Can contain placeholders: {timestamp}.",
|
264 |
+
)
|
265 |
+
parser.add_argument(
|
266 |
+
"--cache_dir",
|
267 |
+
type=str,
|
268 |
+
default=None,
|
269 |
+
help="The directory where the downloaded models and datasets will be stored.",
|
270 |
+
)
|
271 |
+
parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.")
|
272 |
+
parser.add_argument(
|
273 |
+
"--resolution",
|
274 |
+
type=int,
|
275 |
+
default=512,
|
276 |
+
help=(
|
277 |
+
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
|
278 |
+
" resolution"
|
279 |
+
),
|
280 |
+
)
|
281 |
+
parser.add_argument(
|
282 |
+
"--train_batch_size", type=int, default=1, help="Batch size (per device) for the training dataloader."
|
283 |
+
)
|
284 |
+
parser.add_argument("--num_train_epochs", type=int, default=100)
|
285 |
+
parser.add_argument(
|
286 |
+
"--max_train_steps",
|
287 |
+
type=int,
|
288 |
+
default=None,
|
289 |
+
help="Total number of training steps to perform.",
|
290 |
+
)
|
291 |
+
parser.add_argument(
|
292 |
+
"--checkpointing_steps",
|
293 |
+
type=int,
|
294 |
+
default=5000,
|
295 |
+
help=("Save a checkpoint of the training state every X updates."),
|
296 |
+
)
|
297 |
+
parser.add_argument(
|
298 |
+
"--learning_rate",
|
299 |
+
type=float,
|
300 |
+
default=1e-4,
|
301 |
+
help="Initial learning rate (after the potential warmup period) to use.",
|
302 |
+
)
|
303 |
+
parser.add_argument(
|
304 |
+
"--scale_lr",
|
305 |
+
action="store_true",
|
306 |
+
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
|
307 |
+
)
|
308 |
+
parser.add_argument(
|
309 |
+
"--lr_scheduler",
|
310 |
+
type=str,
|
311 |
+
default="constant",
|
312 |
+
help=(
|
313 |
+
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
|
314 |
+
' "constant", "constant_with_warmup"]'
|
315 |
+
),
|
316 |
+
)
|
317 |
+
parser.add_argument(
|
318 |
+
"--snr_gamma",
|
319 |
+
type=float,
|
320 |
+
default=None,
|
321 |
+
help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
|
322 |
+
"More details here: https://arxiv.org/abs/2303.09556.",
|
323 |
+
)
|
324 |
+
parser.add_argument(
|
325 |
+
"--dataloader_num_workers",
|
326 |
+
type=int,
|
327 |
+
default=0,
|
328 |
+
help=(
|
329 |
+
"Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
|
330 |
+
),
|
331 |
+
)
|
332 |
+
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
|
333 |
+
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
|
334 |
+
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
|
335 |
+
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
|
336 |
+
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
|
337 |
+
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
338 |
+
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
|
339 |
+
parser.add_argument(
|
340 |
+
"--hub_model_id",
|
341 |
+
type=str,
|
342 |
+
default=None,
|
343 |
+
help="The name of the repository to keep in sync with the local `output_dir`.",
|
344 |
+
)
|
345 |
+
parser.add_argument(
|
346 |
+
"--logging_steps",
|
347 |
+
type=int,
|
348 |
+
default=100,
|
349 |
+
help=("log training metric every X steps to `--report_t`"),
|
350 |
+
)
|
351 |
+
parser.add_argument(
|
352 |
+
"--report_to",
|
353 |
+
type=str,
|
354 |
+
default="wandb",
|
355 |
+
help=('The integration to report the results and logs to. Currently only supported platforms are `"wandb"`'),
|
356 |
+
)
|
357 |
+
parser.add_argument(
|
358 |
+
"--mixed_precision",
|
359 |
+
type=str,
|
360 |
+
default="no",
|
361 |
+
choices=["no", "fp16", "bf16"],
|
362 |
+
help=(
|
363 |
+
"Whether to use mixed precision. Choose"
|
364 |
+
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
|
365 |
+
"and an Nvidia Ampere GPU."
|
366 |
+
),
|
367 |
+
)
|
368 |
+
parser.add_argument(
|
369 |
+
"--dataset_name",
|
370 |
+
type=str,
|
371 |
+
default=None,
|
372 |
+
help=(
|
373 |
+
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
|
374 |
+
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
|
375 |
+
" or to a folder containing files that 🤗 Datasets can understand."
|
376 |
+
),
|
377 |
+
)
|
378 |
+
parser.add_argument("--streaming", action="store_true", help="To stream a large dataset from Hub.")
|
379 |
+
parser.add_argument(
|
380 |
+
"--dataset_config_name",
|
381 |
+
type=str,
|
382 |
+
default=None,
|
383 |
+
help="The config of the Dataset, leave as None if there's only one config.",
|
384 |
+
)
|
385 |
+
parser.add_argument(
|
386 |
+
"--train_data_dir",
|
387 |
+
type=str,
|
388 |
+
default=None,
|
389 |
+
help=(
|
390 |
+
"A folder containing the training dataset. By default it will use `load_dataset` method to load a custom dataset from the folder."
|
391 |
+
"Folder must contain a dataset script as described here https://huggingface.co/docs/datasets/dataset_script) ."
|
392 |
+
"If `--load_from_disk` flag is passed, it will use `load_from_disk` method instead. Ignored if `dataset_name` is specified."
|
393 |
+
),
|
394 |
+
)
|
395 |
+
parser.add_argument(
|
396 |
+
"--load_from_disk",
|
397 |
+
action="store_true",
|
398 |
+
help=(
|
399 |
+
"If True, will load a dataset that was previously saved using `save_to_disk` from `--train_data_dir`"
|
400 |
+
"See more https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.load_from_disk"
|
401 |
+
),
|
402 |
+
)
|
403 |
+
parser.add_argument(
|
404 |
+
"--image_column", type=str, default="image", help="The column of the dataset containing the target image."
|
405 |
+
)
|
406 |
+
parser.add_argument(
|
407 |
+
"--conditioning_image_column",
|
408 |
+
type=str,
|
409 |
+
default="conditioning_image",
|
410 |
+
help="The column of the dataset containing the controlnet conditioning image.",
|
411 |
+
)
|
412 |
+
parser.add_argument(
|
413 |
+
"--caption_column",
|
414 |
+
type=str,
|
415 |
+
default="text",
|
416 |
+
help="The column of the dataset containing a caption or a list of captions.",
|
417 |
+
)
|
418 |
+
parser.add_argument(
|
419 |
+
"--max_train_samples",
|
420 |
+
type=int,
|
421 |
+
default=None,
|
422 |
+
help=(
|
423 |
+
"For debugging purposes or quicker training, truncate the number of training examples to this "
|
424 |
+
"value if set. Needed if `streaming` is set to True."
|
425 |
+
),
|
426 |
+
)
|
427 |
+
parser.add_argument(
|
428 |
+
"--proportion_empty_prompts",
|
429 |
+
type=float,
|
430 |
+
default=0,
|
431 |
+
help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).",
|
432 |
+
)
|
433 |
+
parser.add_argument(
|
434 |
+
"--validation_prompt",
|
435 |
+
type=str,
|
436 |
+
default=None,
|
437 |
+
nargs="+",
|
438 |
+
help=(
|
439 |
+
"A set of prompts evaluated every `--validation_steps` and logged to `--report_to`."
|
440 |
+
" Provide either a matching number of `--validation_image`s, a single `--validation_image`"
|
441 |
+
" to be used with all prompts, or a single prompt that will be used with all `--validation_image`s."
|
442 |
+
),
|
443 |
+
)
|
444 |
+
parser.add_argument(
|
445 |
+
"--validation_image",
|
446 |
+
type=str,
|
447 |
+
default=None,
|
448 |
+
nargs="+",
|
449 |
+
help=(
|
450 |
+
"A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`"
|
451 |
+
" and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a"
|
452 |
+
" a single `--validation_prompt` to be used with all `--validation_image`s, or a single"
|
453 |
+
" `--validation_image` that will be used with all `--validation_prompt`s."
|
454 |
+
),
|
455 |
+
)
|
456 |
+
parser.add_argument(
|
457 |
+
"--validation_steps",
|
458 |
+
type=int,
|
459 |
+
default=100,
|
460 |
+
help=(
|
461 |
+
"Run validation every X steps. Validation consists of running the prompt"
|
462 |
+
" `args.validation_prompt` and logging the images."
|
463 |
+
),
|
464 |
+
)
|
465 |
+
parser.add_argument("--wandb_entity", type=str, default=None, help=("The wandb entity to use (for teams)."))
|
466 |
+
parser.add_argument(
|
467 |
+
"--tracker_project_name",
|
468 |
+
type=str,
|
469 |
+
default="train_controlnet_flax",
|
470 |
+
help=("The `project` argument passed to wandb"),
|
471 |
+
)
|
472 |
+
parser.add_argument(
|
473 |
+
"--gradient_accumulation_steps", type=int, default=1, help="Number of steps to accumulate gradients over"
|
474 |
+
)
|
475 |
+
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
|
476 |
+
|
477 |
+
args = parser.parse_args()
|
478 |
+
args.output_dir = args.output_dir.replace("{timestamp}", time.strftime("%Y%m%d_%H%M%S"))
|
479 |
+
|
480 |
+
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
481 |
+
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
482 |
+
args.local_rank = env_local_rank
|
483 |
+
|
484 |
+
# Sanity checks
|
485 |
+
if args.dataset_name is None and args.train_data_dir is None:
|
486 |
+
raise ValueError("Need either a dataset name or a training folder.")
|
487 |
+
if args.dataset_name is not None and args.train_data_dir is not None:
|
488 |
+
raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`")
|
489 |
+
|
490 |
+
if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1:
|
491 |
+
raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].")
|
492 |
+
|
493 |
+
if args.validation_prompt is not None and args.validation_image is None:
|
494 |
+
raise ValueError("`--validation_image` must be set if `--validation_prompt` is set")
|
495 |
+
|
496 |
+
if args.validation_prompt is None and args.validation_image is not None:
|
497 |
+
raise ValueError("`--validation_prompt` must be set if `--validation_image` is set")
|
498 |
+
|
499 |
+
if (
|
500 |
+
args.validation_image is not None
|
501 |
+
and args.validation_prompt is not None
|
502 |
+
and len(args.validation_image) != 1
|
503 |
+
and len(args.validation_prompt) != 1
|
504 |
+
and len(args.validation_image) != len(args.validation_prompt)
|
505 |
+
):
|
506 |
+
raise ValueError(
|
507 |
+
"Must provide either 1 `--validation_image`, 1 `--validation_prompt`,"
|
508 |
+
" or the same number of `--validation_prompt`s and `--validation_image`s"
|
509 |
+
)
|
510 |
+
|
511 |
+
# This idea comes from
|
512 |
+
# https://github.com/borisdayma/dalle-mini/blob/d2be512d4a6a9cda2d63ba04afc33038f98f705f/src/dalle_mini/data.py#L370
|
513 |
+
if args.streaming and args.max_train_samples is None:
|
514 |
+
raise ValueError("You must specify `max_train_samples` when using dataset streaming.")
|
515 |
+
|
516 |
+
return args
|
517 |
+
|
518 |
+
|
519 |
+
def make_train_dataset(args, tokenizer, batch_size=None):
|
520 |
+
# Get the datasets: you can either provide your own training and evaluation files (see below)
|
521 |
+
# or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
|
522 |
+
|
523 |
+
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
|
524 |
+
# download the dataset.
|
525 |
+
if args.dataset_name is not None:
|
526 |
+
# Downloading and loading a dataset from the hub.
|
527 |
+
# dataset = load_dataset(
|
528 |
+
# args.dataset_name,
|
529 |
+
# args.dataset_config_name,
|
530 |
+
# cache_dir=args.cache_dir,
|
531 |
+
# streaming=args.streaming,
|
532 |
+
# )
|
533 |
+
|
534 |
+
dataset = load_dataset("/home/birgermoell/data")
|
535 |
+
|
536 |
+
else:
|
537 |
+
if args.train_data_dir is not None:
|
538 |
+
if args.load_from_disk:
|
539 |
+
dataset = load_from_disk(
|
540 |
+
args.train_data_dir,
|
541 |
+
)
|
542 |
+
else:
|
543 |
+
dataset = load_dataset(
|
544 |
+
args.train_data_dir,
|
545 |
+
cache_dir=args.cache_dir,
|
546 |
+
)
|
547 |
+
# See more about loading custom images at
|
548 |
+
# https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
|
549 |
+
|
550 |
+
# Preprocessing the datasets.
|
551 |
+
# We need to tokenize inputs and targets.
|
552 |
+
if isinstance(dataset["train"], IterableDataset):
|
553 |
+
column_names = next(iter(dataset["train"])).keys()
|
554 |
+
else:
|
555 |
+
column_names = dataset["train"].column_names
|
556 |
+
|
557 |
+
# 6. Get the column names for input/target.
|
558 |
+
if args.image_column is None:
|
559 |
+
image_column = column_names[0]
|
560 |
+
logger.info(f"image column defaulting to {image_column}")
|
561 |
+
else:
|
562 |
+
image_column = args.image_column
|
563 |
+
if image_column not in column_names:
|
564 |
+
raise ValueError(
|
565 |
+
f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
|
566 |
+
)
|
567 |
+
|
568 |
+
if args.caption_column is None:
|
569 |
+
caption_column = column_names[1]
|
570 |
+
logger.info(f"caption column defaulting to {caption_column}")
|
571 |
+
else:
|
572 |
+
caption_column = args.caption_column
|
573 |
+
if caption_column not in column_names:
|
574 |
+
raise ValueError(
|
575 |
+
f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
|
576 |
+
)
|
577 |
+
|
578 |
+
if args.conditioning_image_column is None:
|
579 |
+
conditioning_image_column = column_names[2]
|
580 |
+
logger.info(f"conditioning image column defaulting to {caption_column}")
|
581 |
+
else:
|
582 |
+
conditioning_image_column = args.conditioning_image_column
|
583 |
+
if conditioning_image_column not in column_names:
|
584 |
+
raise ValueError(
|
585 |
+
f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
|
586 |
+
)
|
587 |
+
|
588 |
+
def tokenize_captions(examples, is_train=True):
|
589 |
+
captions = []
|
590 |
+
for caption in examples[caption_column]:
|
591 |
+
if random.random() < args.proportion_empty_prompts:
|
592 |
+
captions.append("")
|
593 |
+
elif isinstance(caption, str):
|
594 |
+
captions.append(caption)
|
595 |
+
elif isinstance(caption, (list, np.ndarray)):
|
596 |
+
# take a random caption if there are multiple
|
597 |
+
captions.append(random.choice(caption) if is_train else caption[0])
|
598 |
+
else:
|
599 |
+
raise ValueError(
|
600 |
+
f"Caption column `{caption_column}` should contain either strings or lists of strings."
|
601 |
+
)
|
602 |
+
inputs = tokenizer(
|
603 |
+
captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
|
604 |
+
)
|
605 |
+
return inputs.input_ids
|
606 |
+
|
607 |
+
image_transforms = transforms.Compose(
|
608 |
+
[
|
609 |
+
transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
|
610 |
+
transforms.CenterCrop(args.resolution),
|
611 |
+
transforms.ToTensor(),
|
612 |
+
transforms.Normalize([0.5], [0.5]),
|
613 |
+
]
|
614 |
+
)
|
615 |
+
|
616 |
+
conditioning_image_transforms = transforms.Compose(
|
617 |
+
[
|
618 |
+
transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
|
619 |
+
transforms.CenterCrop(args.resolution),
|
620 |
+
transforms.ToTensor(),
|
621 |
+
]
|
622 |
+
)
|
623 |
+
|
624 |
+
def preprocess_train(examples):
|
625 |
+
images = [image.convert("RGB") for image in examples[image_column]]
|
626 |
+
images = [image_transforms(image) for image in images]
|
627 |
+
|
628 |
+
conditioning_images = [image.convert("RGB") for image in examples[conditioning_image_column]]
|
629 |
+
conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images]
|
630 |
+
|
631 |
+
examples["pixel_values"] = images
|
632 |
+
examples["conditioning_pixel_values"] = conditioning_images
|
633 |
+
examples["input_ids"] = tokenize_captions(examples)
|
634 |
+
|
635 |
+
return examples
|
636 |
+
|
637 |
+
if jax.process_index() == 0:
|
638 |
+
if args.max_train_samples is not None:
|
639 |
+
if args.streaming:
|
640 |
+
dataset["train"] = dataset["train"].shuffle(seed=args.seed).take(args.max_train_samples)
|
641 |
+
else:
|
642 |
+
dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
|
643 |
+
# Set the training transforms
|
644 |
+
if args.streaming:
|
645 |
+
train_dataset = dataset["train"].map(
|
646 |
+
preprocess_train,
|
647 |
+
batched=True,
|
648 |
+
batch_size=batch_size,
|
649 |
+
remove_columns=list(dataset["train"].features.keys()),
|
650 |
+
)
|
651 |
+
else:
|
652 |
+
train_dataset = dataset["train"].with_transform(preprocess_train)
|
653 |
+
|
654 |
+
return train_dataset
|
655 |
+
|
656 |
+
|
657 |
+
def collate_fn(examples):
|
658 |
+
pixel_values = torch.stack([example["pixel_values"] for example in examples])
|
659 |
+
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
|
660 |
+
|
661 |
+
conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples])
|
662 |
+
conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float()
|
663 |
+
|
664 |
+
input_ids = torch.stack([example["input_ids"] for example in examples])
|
665 |
+
|
666 |
+
batch = {
|
667 |
+
"pixel_values": pixel_values,
|
668 |
+
"conditioning_pixel_values": conditioning_pixel_values,
|
669 |
+
"input_ids": input_ids,
|
670 |
+
}
|
671 |
+
batch = {k: v.numpy() for k, v in batch.items()}
|
672 |
+
return batch
|
673 |
+
|
674 |
+
|
675 |
+
def get_params_to_save(params):
|
676 |
+
return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
|
677 |
+
|
678 |
+
|
679 |
+
def main():
|
680 |
+
args = parse_args()
|
681 |
+
|
682 |
+
logging.basicConfig(
|
683 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
684 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
685 |
+
level=logging.INFO,
|
686 |
+
)
|
687 |
+
# Setup logging, we only want one process per machine to log things on the screen.
|
688 |
+
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
|
689 |
+
if jax.process_index() == 0:
|
690 |
+
transformers.utils.logging.set_verbosity_info()
|
691 |
+
else:
|
692 |
+
transformers.utils.logging.set_verbosity_error()
|
693 |
+
|
694 |
+
# wandb init
|
695 |
+
if jax.process_index() == 0 and args.report_to == "wandb":
|
696 |
+
wandb.init(
|
697 |
+
entity=args.wandb_entity,
|
698 |
+
project=args.tracker_project_name,
|
699 |
+
job_type="train",
|
700 |
+
config=args,
|
701 |
+
)
|
702 |
+
|
703 |
+
if args.seed is not None:
|
704 |
+
set_seed(args.seed)
|
705 |
+
|
706 |
+
rng = jax.random.PRNGKey(0)
|
707 |
+
|
708 |
+
# Handle the repository creation
|
709 |
+
if jax.process_index() == 0:
|
710 |
+
if args.output_dir is not None:
|
711 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
712 |
+
|
713 |
+
if args.push_to_hub:
|
714 |
+
repo_id = create_repo(
|
715 |
+
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
|
716 |
+
).repo_id
|
717 |
+
|
718 |
+
# Load the tokenizer and add the placeholder token as a additional special token
|
719 |
+
if args.tokenizer_name:
|
720 |
+
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
|
721 |
+
elif args.pretrained_model_name_or_path:
|
722 |
+
tokenizer = CLIPTokenizer.from_pretrained(
|
723 |
+
args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
|
724 |
+
)
|
725 |
+
else:
|
726 |
+
raise NotImplementedError("No tokenizer specified!")
|
727 |
+
|
728 |
+
# Get the datasets: you can either provide your own training and evaluation files (see below)
|
729 |
+
total_train_batch_size = args.train_batch_size * jax.local_device_count() * args.gradient_accumulation_steps
|
730 |
+
train_dataset = make_train_dataset(args, tokenizer, batch_size=total_train_batch_size)
|
731 |
+
|
732 |
+
train_dataloader = torch.utils.data.DataLoader(
|
733 |
+
train_dataset,
|
734 |
+
shuffle=not args.streaming,
|
735 |
+
collate_fn=collate_fn,
|
736 |
+
batch_size=total_train_batch_size,
|
737 |
+
num_workers=args.dataloader_num_workers,
|
738 |
+
drop_last=True,
|
739 |
+
)
|
740 |
+
|
741 |
+
weight_dtype = jnp.float32
|
742 |
+
if args.mixed_precision == "fp16":
|
743 |
+
weight_dtype = jnp.float16
|
744 |
+
elif args.mixed_precision == "bf16":
|
745 |
+
weight_dtype = jnp.bfloat16
|
746 |
+
|
747 |
+
# Load models and create wrapper for stable diffusion
|
748 |
+
text_encoder = FlaxCLIPTextModel.from_pretrained(
|
749 |
+
args.pretrained_model_name_or_path,
|
750 |
+
subfolder="text_encoder",
|
751 |
+
dtype=weight_dtype,
|
752 |
+
revision=args.revision,
|
753 |
+
from_pt=args.from_pt,
|
754 |
+
)
|
755 |
+
vae, vae_params = FlaxAutoencoderKL.from_pretrained(
|
756 |
+
args.pretrained_model_name_or_path,
|
757 |
+
revision=args.revision,
|
758 |
+
subfolder="vae",
|
759 |
+
dtype=weight_dtype,
|
760 |
+
from_pt=args.from_pt,
|
761 |
+
)
|
762 |
+
unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
|
763 |
+
args.pretrained_model_name_or_path,
|
764 |
+
subfolder="unet",
|
765 |
+
dtype=weight_dtype,
|
766 |
+
revision=args.revision,
|
767 |
+
from_pt=args.from_pt,
|
768 |
+
)
|
769 |
+
|
770 |
+
if args.controlnet_model_name_or_path:
|
771 |
+
logger.info("Loading existing controlnet weights")
|
772 |
+
controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
|
773 |
+
args.controlnet_model_name_or_path,
|
774 |
+
revision=args.controlnet_revision,
|
775 |
+
from_pt=args.controlnet_from_pt,
|
776 |
+
dtype=jnp.float32,
|
777 |
+
)
|
778 |
+
else:
|
779 |
+
logger.info("Initializing controlnet weights from unet")
|
780 |
+
rng, rng_params = jax.random.split(rng)
|
781 |
+
|
782 |
+
controlnet = FlaxControlNetModel(
|
783 |
+
in_channels=unet.config.in_channels,
|
784 |
+
down_block_types=unet.config.down_block_types,
|
785 |
+
only_cross_attention=unet.config.only_cross_attention,
|
786 |
+
block_out_channels=unet.config.block_out_channels,
|
787 |
+
layers_per_block=unet.config.layers_per_block,
|
788 |
+
attention_head_dim=unet.config.attention_head_dim,
|
789 |
+
cross_attention_dim=unet.config.cross_attention_dim,
|
790 |
+
use_linear_projection=unet.config.use_linear_projection,
|
791 |
+
flip_sin_to_cos=unet.config.flip_sin_to_cos,
|
792 |
+
freq_shift=unet.config.freq_shift,
|
793 |
+
)
|
794 |
+
controlnet_params = controlnet.init_weights(rng=rng_params)
|
795 |
+
controlnet_params = unfreeze(controlnet_params)
|
796 |
+
for key in [
|
797 |
+
"conv_in",
|
798 |
+
"time_embedding",
|
799 |
+
"down_blocks_0",
|
800 |
+
"down_blocks_1",
|
801 |
+
"down_blocks_2",
|
802 |
+
"down_blocks_3",
|
803 |
+
"mid_block",
|
804 |
+
]:
|
805 |
+
controlnet_params[key] = unet_params[key]
|
806 |
+
|
807 |
+
# Optimization
|
808 |
+
if args.scale_lr:
|
809 |
+
args.learning_rate = args.learning_rate * total_train_batch_size
|
810 |
+
|
811 |
+
constant_scheduler = optax.constant_schedule(args.learning_rate)
|
812 |
+
|
813 |
+
adamw = optax.adamw(
|
814 |
+
learning_rate=constant_scheduler,
|
815 |
+
b1=args.adam_beta1,
|
816 |
+
b2=args.adam_beta2,
|
817 |
+
eps=args.adam_epsilon,
|
818 |
+
weight_decay=args.adam_weight_decay,
|
819 |
+
)
|
820 |
+
|
821 |
+
optimizer = optax.chain(
|
822 |
+
optax.clip_by_global_norm(args.max_grad_norm),
|
823 |
+
adamw,
|
824 |
+
)
|
825 |
+
|
826 |
+
state = train_state.TrainState.create(apply_fn=controlnet.__call__, params=controlnet_params, tx=optimizer)
|
827 |
+
|
828 |
+
noise_scheduler, noise_scheduler_state = FlaxDDPMScheduler.from_pretrained(
|
829 |
+
args.pretrained_model_name_or_path, subfolder="scheduler"
|
830 |
+
)
|
831 |
+
|
832 |
+
# Initialize our training
|
833 |
+
validation_rng, train_rngs = jax.random.split(rng)
|
834 |
+
train_rngs = jax.random.split(train_rngs, jax.local_device_count())
|
835 |
+
|
836 |
+
def compute_snr(timesteps):
|
837 |
+
"""
|
838 |
+
Computes SNR as per https://github.com/TiankaiHang/Min-SNR-Diffusion-Training/blob/521b624bd70c67cee4bdf49225915f5945a872e3/guided_diffusion/gaussian_diffusion.py#L847-L849
|
839 |
+
"""
|
840 |
+
alphas_cumprod = noise_scheduler_state.common.alphas_cumprod
|
841 |
+
sqrt_alphas_cumprod = alphas_cumprod**0.5
|
842 |
+
sqrt_one_minus_alphas_cumprod = (1.0 - alphas_cumprod) ** 0.5
|
843 |
+
|
844 |
+
alpha = sqrt_alphas_cumprod[timesteps]
|
845 |
+
sigma = sqrt_one_minus_alphas_cumprod[timesteps]
|
846 |
+
# Compute SNR.
|
847 |
+
snr = (alpha / sigma) ** 2
|
848 |
+
return snr
|
849 |
+
|
850 |
+
def train_step(state, unet_params, text_encoder_params, vae_params, batch, train_rng):
|
851 |
+
# reshape batch, add grad_step_dim if gradient_accumulation_steps > 1
|
852 |
+
if args.gradient_accumulation_steps > 1:
|
853 |
+
grad_steps = args.gradient_accumulation_steps
|
854 |
+
batch = jax.tree_map(lambda x: x.reshape((grad_steps, x.shape[0] // grad_steps) + x.shape[1:]), batch)
|
855 |
+
|
856 |
+
def compute_loss(params, minibatch, sample_rng):
|
857 |
+
# Convert images to latent space
|
858 |
+
vae_outputs = vae.apply(
|
859 |
+
{"params": vae_params}, minibatch["pixel_values"], deterministic=True, method=vae.encode
|
860 |
+
)
|
861 |
+
latents = vae_outputs.latent_dist.sample(sample_rng)
|
862 |
+
# (NHWC) -> (NCHW)
|
863 |
+
latents = jnp.transpose(latents, (0, 3, 1, 2))
|
864 |
+
latents = latents * vae.config.scaling_factor
|
865 |
+
|
866 |
+
# Sample noise that we'll add to the latents
|
867 |
+
noise_rng, timestep_rng = jax.random.split(sample_rng)
|
868 |
+
noise = jax.random.normal(noise_rng, latents.shape)
|
869 |
+
# Sample a random timestep for each image
|
870 |
+
bsz = latents.shape[0]
|
871 |
+
timesteps = jax.random.randint(
|
872 |
+
timestep_rng,
|
873 |
+
(bsz,),
|
874 |
+
0,
|
875 |
+
noise_scheduler.config.num_train_timesteps,
|
876 |
+
)
|
877 |
+
|
878 |
+
# Add noise to the latents according to the noise magnitude at each timestep
|
879 |
+
# (this is the forward diffusion process)
|
880 |
+
noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps)
|
881 |
+
|
882 |
+
# Get the text embedding for conditioning
|
883 |
+
encoder_hidden_states = text_encoder(
|
884 |
+
minibatch["input_ids"],
|
885 |
+
params=text_encoder_params,
|
886 |
+
train=False,
|
887 |
+
)[0]
|
888 |
+
|
889 |
+
controlnet_cond = minibatch["conditioning_pixel_values"]
|
890 |
+
|
891 |
+
# Predict the noise residual and compute loss
|
892 |
+
down_block_res_samples, mid_block_res_sample = controlnet.apply(
|
893 |
+
{"params": params},
|
894 |
+
noisy_latents,
|
895 |
+
timesteps,
|
896 |
+
encoder_hidden_states,
|
897 |
+
controlnet_cond,
|
898 |
+
train=True,
|
899 |
+
return_dict=False,
|
900 |
+
)
|
901 |
+
|
902 |
+
model_pred = unet.apply(
|
903 |
+
{"params": unet_params},
|
904 |
+
noisy_latents,
|
905 |
+
timesteps,
|
906 |
+
encoder_hidden_states,
|
907 |
+
down_block_additional_residuals=down_block_res_samples,
|
908 |
+
mid_block_additional_residual=mid_block_res_sample,
|
909 |
+
).sample
|
910 |
+
|
911 |
+
# Get the target for loss depending on the prediction type
|
912 |
+
if noise_scheduler.config.prediction_type == "epsilon":
|
913 |
+
target = noise
|
914 |
+
elif noise_scheduler.config.prediction_type == "v_prediction":
|
915 |
+
target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps)
|
916 |
+
else:
|
917 |
+
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
|
918 |
+
|
919 |
+
loss = (target - model_pred) ** 2
|
920 |
+
|
921 |
+
if args.snr_gamma is not None:
|
922 |
+
snr = jnp.array(compute_snr(timesteps))
|
923 |
+
snr_loss_weights = jnp.where(snr < args.snr_gamma, snr, jnp.ones_like(snr) * args.snr_gamma) / snr
|
924 |
+
loss = loss * snr_loss_weights
|
925 |
+
|
926 |
+
loss = loss.mean()
|
927 |
+
|
928 |
+
return loss
|
929 |
+
|
930 |
+
grad_fn = jax.value_and_grad(compute_loss)
|
931 |
+
|
932 |
+
# get a minibatch (one gradient accumulation slice)
|
933 |
+
def get_minibatch(batch, grad_idx):
|
934 |
+
return jax.tree_util.tree_map(
|
935 |
+
lambda x: jax.lax.dynamic_index_in_dim(x, grad_idx, keepdims=False),
|
936 |
+
batch,
|
937 |
+
)
|
938 |
+
|
939 |
+
def loss_and_grad(grad_idx, train_rng):
|
940 |
+
# create minibatch for the grad step
|
941 |
+
minibatch = get_minibatch(batch, grad_idx) if grad_idx is not None else batch
|
942 |
+
sample_rng, train_rng = jax.random.split(train_rng, 2)
|
943 |
+
loss, grad = grad_fn(state.params, minibatch, sample_rng)
|
944 |
+
return loss, grad, train_rng
|
945 |
+
|
946 |
+
if args.gradient_accumulation_steps == 1:
|
947 |
+
loss, grad, new_train_rng = loss_and_grad(None, train_rng)
|
948 |
+
else:
|
949 |
+
init_loss_grad_rng = (
|
950 |
+
0.0, # initial value for cumul_loss
|
951 |
+
jax.tree_map(jnp.zeros_like, state.params), # initial value for cumul_grad
|
952 |
+
train_rng, # initial value for train_rng
|
953 |
+
)
|
954 |
+
|
955 |
+
def cumul_grad_step(grad_idx, loss_grad_rng):
|
956 |
+
cumul_loss, cumul_grad, train_rng = loss_grad_rng
|
957 |
+
loss, grad, new_train_rng = loss_and_grad(grad_idx, train_rng)
|
958 |
+
cumul_loss, cumul_grad = jax.tree_map(jnp.add, (cumul_loss, cumul_grad), (loss, grad))
|
959 |
+
return cumul_loss, cumul_grad, new_train_rng
|
960 |
+
|
961 |
+
loss, grad, new_train_rng = jax.lax.fori_loop(
|
962 |
+
0,
|
963 |
+
args.gradient_accumulation_steps,
|
964 |
+
cumul_grad_step,
|
965 |
+
init_loss_grad_rng,
|
966 |
+
)
|
967 |
+
loss, grad = jax.tree_map(lambda x: x / args.gradient_accumulation_steps, (loss, grad))
|
968 |
+
|
969 |
+
grad = jax.lax.pmean(grad, "batch")
|
970 |
+
|
971 |
+
new_state = state.apply_gradients(grads=grad)
|
972 |
+
|
973 |
+
metrics = {"loss": loss}
|
974 |
+
metrics = jax.lax.pmean(metrics, axis_name="batch")
|
975 |
+
|
976 |
+
def l2(xs):
|
977 |
+
return jnp.sqrt(sum([jnp.vdot(x, x) for x in jax.tree_util.tree_leaves(xs)]))
|
978 |
+
|
979 |
+
metrics["l2_grads"] = l2(jax.tree_util.tree_leaves(grad))
|
980 |
+
|
981 |
+
return new_state, metrics, new_train_rng
|
982 |
+
|
983 |
+
# Create parallel version of the train step
|
984 |
+
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
|
985 |
+
|
986 |
+
# Replicate the train state on each device
|
987 |
+
state = jax_utils.replicate(state)
|
988 |
+
unet_params = jax_utils.replicate(unet_params)
|
989 |
+
text_encoder_params = jax_utils.replicate(text_encoder.params)
|
990 |
+
vae_params = jax_utils.replicate(vae_params)
|
991 |
+
|
992 |
+
# Train!
|
993 |
+
if args.streaming:
|
994 |
+
dataset_length = args.max_train_samples
|
995 |
+
else:
|
996 |
+
dataset_length = len(train_dataloader)
|
997 |
+
num_update_steps_per_epoch = math.ceil(dataset_length / args.gradient_accumulation_steps)
|
998 |
+
|
999 |
+
# Scheduler and math around the number of training steps.
|
1000 |
+
if args.max_train_steps is None:
|
1001 |
+
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
1002 |
+
|
1003 |
+
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
1004 |
+
|
1005 |
+
logger.info("***** Running training *****")
|
1006 |
+
logger.info(f" Num examples = {args.max_train_samples if args.streaming else len(train_dataset)}")
|
1007 |
+
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
1008 |
+
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
|
1009 |
+
logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
|
1010 |
+
logger.info(f" Total optimization steps = {args.num_train_epochs * num_update_steps_per_epoch}")
|
1011 |
+
|
1012 |
+
if jax.process_index() == 0 and args.report_to == "wandb":
|
1013 |
+
wandb.define_metric("*", step_metric="train/step")
|
1014 |
+
wandb.define_metric("train/step", step_metric="walltime")
|
1015 |
+
wandb.config.update(
|
1016 |
+
{
|
1017 |
+
"num_train_examples": args.max_train_samples if args.streaming else len(train_dataset),
|
1018 |
+
"total_train_batch_size": total_train_batch_size,
|
1019 |
+
"total_optimization_step": args.num_train_epochs * num_update_steps_per_epoch,
|
1020 |
+
"num_devices": jax.device_count(),
|
1021 |
+
"controlnet_params": sum(np.prod(x.shape) for x in jax.tree_util.tree_leaves(state.params)),
|
1022 |
+
}
|
1023 |
+
)
|
1024 |
+
|
1025 |
+
global_step = step0 = 0
|
1026 |
+
epochs = tqdm(
|
1027 |
+
range(args.num_train_epochs),
|
1028 |
+
desc="Epoch ... ",
|
1029 |
+
position=0,
|
1030 |
+
disable=jax.process_index() > 0,
|
1031 |
+
)
|
1032 |
+
if args.profile_memory:
|
1033 |
+
jax.profiler.save_device_memory_profile(os.path.join(args.output_dir, "memory_initial.prof"))
|
1034 |
+
t00 = t0 = time.monotonic()
|
1035 |
+
for epoch in epochs:
|
1036 |
+
# ======================== Training ================================
|
1037 |
+
|
1038 |
+
train_metrics = []
|
1039 |
+
train_metric = None
|
1040 |
+
|
1041 |
+
steps_per_epoch = (
|
1042 |
+
args.max_train_samples // total_train_batch_size
|
1043 |
+
if args.streaming or args.max_train_samples
|
1044 |
+
else len(train_dataset) // total_train_batch_size
|
1045 |
+
)
|
1046 |
+
train_step_progress_bar = tqdm(
|
1047 |
+
total=steps_per_epoch,
|
1048 |
+
desc="Training...",
|
1049 |
+
position=1,
|
1050 |
+
leave=False,
|
1051 |
+
disable=jax.process_index() > 0,
|
1052 |
+
)
|
1053 |
+
# train
|
1054 |
+
for batch in train_dataloader:
|
1055 |
+
if args.profile_steps and global_step == 1:
|
1056 |
+
train_metric["loss"].block_until_ready()
|
1057 |
+
jax.profiler.start_trace(args.output_dir)
|
1058 |
+
if args.profile_steps and global_step == 1 + args.profile_steps:
|
1059 |
+
train_metric["loss"].block_until_ready()
|
1060 |
+
jax.profiler.stop_trace()
|
1061 |
+
|
1062 |
+
batch = shard(batch)
|
1063 |
+
with jax.profiler.StepTraceAnnotation("train", step_num=global_step):
|
1064 |
+
state, train_metric, train_rngs = p_train_step(
|
1065 |
+
state, unet_params, text_encoder_params, vae_params, batch, train_rngs
|
1066 |
+
)
|
1067 |
+
train_metrics.append(train_metric)
|
1068 |
+
|
1069 |
+
train_step_progress_bar.update(1)
|
1070 |
+
|
1071 |
+
global_step += 1
|
1072 |
+
if global_step >= args.max_train_steps:
|
1073 |
+
break
|
1074 |
+
|
1075 |
+
if (
|
1076 |
+
args.validation_prompt is not None
|
1077 |
+
and global_step % args.validation_steps == 0
|
1078 |
+
and jax.process_index() == 0
|
1079 |
+
):
|
1080 |
+
_ = log_validation(controlnet, state.params, tokenizer, args, validation_rng, weight_dtype)
|
1081 |
+
|
1082 |
+
if global_step % args.logging_steps == 0 and jax.process_index() == 0:
|
1083 |
+
if args.report_to == "wandb":
|
1084 |
+
train_metrics = jax_utils.unreplicate(train_metrics)
|
1085 |
+
train_metrics = jax.tree_util.tree_map(lambda *m: jnp.array(m).mean(), *train_metrics)
|
1086 |
+
wandb.log(
|
1087 |
+
{
|
1088 |
+
"walltime": time.monotonic() - t00,
|
1089 |
+
"train/step": global_step,
|
1090 |
+
"train/epoch": global_step / dataset_length,
|
1091 |
+
"train/steps_per_sec": (global_step - step0) / (time.monotonic() - t0),
|
1092 |
+
**{f"train/{k}": v for k, v in train_metrics.items()},
|
1093 |
+
}
|
1094 |
+
)
|
1095 |
+
t0, step0 = time.monotonic(), global_step
|
1096 |
+
train_metrics = []
|
1097 |
+
if global_step % args.checkpointing_steps == 0 and jax.process_index() == 0:
|
1098 |
+
controlnet.save_pretrained(
|
1099 |
+
f"{args.output_dir}/{global_step}",
|
1100 |
+
params=get_params_to_save(state.params),
|
1101 |
+
)
|
1102 |
+
|
1103 |
+
train_metric = jax_utils.unreplicate(train_metric)
|
1104 |
+
train_step_progress_bar.close()
|
1105 |
+
epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
|
1106 |
+
|
1107 |
+
# Final validation & store model.
|
1108 |
+
if jax.process_index() == 0:
|
1109 |
+
if args.validation_prompt is not None:
|
1110 |
+
if args.profile_validation:
|
1111 |
+
jax.profiler.start_trace(args.output_dir)
|
1112 |
+
image_logs = log_validation(controlnet, state.params, tokenizer, args, validation_rng, weight_dtype)
|
1113 |
+
if args.profile_validation:
|
1114 |
+
jax.profiler.stop_trace()
|
1115 |
+
else:
|
1116 |
+
image_logs = None
|
1117 |
+
|
1118 |
+
controlnet.save_pretrained(
|
1119 |
+
args.output_dir,
|
1120 |
+
params=get_params_to_save(state.params),
|
1121 |
+
)
|
1122 |
+
|
1123 |
+
if args.push_to_hub:
|
1124 |
+
save_model_card(
|
1125 |
+
repo_id,
|
1126 |
+
image_logs=image_logs,
|
1127 |
+
base_model=args.pretrained_model_name_or_path,
|
1128 |
+
repo_folder=args.output_dir,
|
1129 |
+
)
|
1130 |
+
upload_folder(
|
1131 |
+
repo_id=repo_id,
|
1132 |
+
folder_path=args.output_dir,
|
1133 |
+
commit_message="End of training",
|
1134 |
+
ignore_patterns=["step_*", "epoch_*"],
|
1135 |
+
)
|
1136 |
+
|
1137 |
+
if args.profile_memory:
|
1138 |
+
jax.profiler.save_device_memory_profile(os.path.join(args.output_dir, "memory_final.prof"))
|
1139 |
+
logger.info("Finished training.")
|
1140 |
+
|
1141 |
+
|
1142 |
+
if __name__ == "__main__":
|
1143 |
+
main()
|