import sys sys.path.insert(0,'stable_diffusion') from train_esd import train_esd import torch ckpt_path = "stable_diffusion/models/ldm/sd-v1-4-full-ema.ckpt" config_path = "stable_diffusion/configs/stable-diffusion/v1-inference.yaml" diffusers_config_path = "stable_diffusion/config.json" orig, newm = train_esd("England", 'xattn', 3, 1, 2, .003, config_path, ckpt_path, diffusers_config_path, ['cuda', 'cuda'], None ) from convertModels import convert_ldm_unet_checkpoint, create_unet_diffusers_config from diffusers import UNet2DConditionModel, AutoencoderKL, LMSDiscreteScheduler from omegaconf import OmegaConf from transformers import CLIPTextModel, CLIPTokenizer original_config = OmegaConf.load(config_path) original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = 4 unet_config = create_unet_diffusers_config(original_config, image_size=512) converted_unet_checkpoint = convert_ldm_unet_checkpoint(newm.state_dict(), unet_config) unet = UNet2DConditionModel(**unet_config) unet.load_state_dict(converted_unet_checkpoint)