File size: 1,207 Bytes
640a27b
 
 
843b14b
640a27b
 
 
 
843b14b
640a27b
 
 
843b14b
640a27b
 
 
 
843b14b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import sys
sys.path.insert(0,'stable_diffusion')
from train_esd import train_esd
import torch
ckpt_path = "stable_diffusion/models/ldm/sd-v1-4-full-ema.ckpt"
config_path = "stable_diffusion/configs/stable-diffusion/v1-inference.yaml"
diffusers_config_path = "stable_diffusion/config.json"

orig, newm = train_esd("England",
              'xattn',
              3,
              1,
              2,
              .003,
              config_path,
              ckpt_path, 
              diffusers_config_path,
              ['cuda', 'cuda'],
              None
              )


from convertModels import convert_ldm_unet_checkpoint, create_unet_diffusers_config
from diffusers import UNet2DConditionModel, AutoencoderKL, LMSDiscreteScheduler
from omegaconf import OmegaConf
from transformers import CLIPTextModel, CLIPTokenizer
original_config = OmegaConf.load(config_path)
original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = 4
unet_config = create_unet_diffusers_config(original_config, image_size=512)
converted_unet_checkpoint = convert_ldm_unet_checkpoint(newm.state_dict(), unet_config)
unet = UNet2DConditionModel(**unet_config)
unet.load_state_dict(converted_unet_checkpoint)