|
|
|
|
|
""" Work in progress |
|
|
|
Similar to generate-embedding.py, but outputs in the format |
|
that SDXL models expect. |
|
|
|
Also tries to load the SDXL base text encoder specifically. |
|
Requires you populate the two paths mentioned immediately below this comment section. |
|
|
|
You can get them from: |
|
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/text_encoder_2 |
|
|
|
(rename diffusion_pytorch_model.safetensors to text_encoder_xl.safetensors) |
|
|
|
|
|
Plan: |
|
Take input for a single word or phrase. |
|
Save out calculations, to "generatedXL.safetensors" |
|
|
|
Note that you can generate an embedding from two words, or even more |
|
|
|
""" |
|
|
|
model_path1 = "text_encoder.safetensors" |
|
model_config1 = "text_encoder_config.json" |
|
model_path2 = "text_encoder_2.safetensors" |
|
model_config2 = "text_encoder_2_config.json" |
|
|
|
import sys |
|
import torch |
|
from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection |
|
from safetensors.torch import save_file |
|
|
|
|
|
|
|
|
|
|
|
tmodel1=None |
|
tmodel2=None |
|
processor=None |
|
|
|
device=torch.device("cuda") |
|
|
|
def initCLIPmodel(model_path,model_config): |
|
global tmodel1 |
|
print("loading",model_path) |
|
tmodel1 = CLIPTextModel.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True) |
|
tmodel1.to(device) |
|
|
|
|
|
|
|
def initXLCLIPmodel(model_path,model_config): |
|
global tmodel2 |
|
print("loading",model_path) |
|
tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True) |
|
tmodel2.to(device) |
|
|
|
|
|
def initCLIPprocessor(): |
|
global processor |
|
CLIPname= "openai/clip-vit-large-patch14" |
|
print("getting processor from",CLIPname) |
|
processor = CLIPProcessor.from_pretrained(CLIPname) |
|
|
|
def embed_from_text(text): |
|
global processor,tmodel1 |
|
if processor == None: |
|
initCLIPprocessor() |
|
initCLIPmodel(model_path1,model_config1) |
|
inputs = processor(text=text, return_tensors="pt") |
|
inputs.to(device) |
|
|
|
print("getting embeddings1") |
|
outputs = tmodel1(**inputs) |
|
embeddings = outputs.pooler_output |
|
return embeddings |
|
|
|
def embed_from_text2(text): |
|
global processor,tmodel2 |
|
if tmodel2 == None: |
|
initXLCLIPmodel(model_path2,model_config2) |
|
inputs = processor(text=text, return_tensors="pt") |
|
inputs.to(device) |
|
|
|
print("getting embeddings2") |
|
outputs = tmodel2(**inputs) |
|
embeddings = outputs.text_embeds |
|
return embeddings |
|
|
|
|
|
|
|
|
|
|
|
word = input("type a phrase to generate an embedding for: ") |
|
|
|
emb1 = embed_from_text(word) |
|
emb2 = embed_from_text2(word) |
|
|
|
print("Shape of results = ",emb1.shape,emb2.shape) |
|
|
|
output = "generated_XL.safetensors" |
|
|
|
if all(char.isalpha() for char in word): |
|
output=f"{word}_XL.safetensors" |
|
print(f"Saving to {output}...") |
|
save_file({"clip_g": emb2,"clip_l":emb1}, output) |
|
|
|
|
|
|
|
|