from nanograd.models.stable_diffusion.clip import CLIP from nanograd.models.stable_diffusion.encoder import VAE_Encoder from nanograd.models.stable_diffusion.decoder import VAE_Decoder from nanograd.models.stable_diffusion.diffusion import Diffusion from nanograd.models.stable_diffusion import model_converter def preload_models_from_standard_weights(ckpt_path, device): state_dict = model_converter.load_from_standard_weights(ckpt_path, device) encoder = VAE_Encoder().to(device) encoder.load_state_dict(state_dict['encoder'], strict=True) decoder = VAE_Decoder().to(device) decoder.load_state_dict(state_dict['decoder'], strict=True) diffusion = Diffusion().to(device) diffusion.load_state_dict(state_dict['diffusion'], strict=True) clip = CLIP().to(device) clip.load_state_dict(state_dict['clip'], strict=True) return { 'clip': clip, 'encoder': encoder, 'decoder': decoder, 'diffusion': diffusion, }