File size: 3,083 Bytes
84892d5
 
 
 
 
e346676
84892d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e346676
 
 
 
84892d5
 
 
 
 
 
 
 
 
 
e346676
 
84892d5
 
 
 
e346676
 
 
 
 
 
 
84892d5
e346676
 
84892d5
e346676
 
84892d5
 
 
 
 
 
 
 
 
e346676
84892d5
 
e346676
 
 
 
 
 
 
 
 
 
 
 
 
84892d5
 
 
e346676
 
84892d5
 
 
 
 
 
 
 
 
e346676
 
84892d5
e346676
84892d5
e346676
84892d5
 
e346676
84892d5
e346676
84892d5
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/bin/env python

""" Work in progress

Similar to generate-embedding.py, but outputs in the format
that SDXL models expect.

Also tries to load the SDXL base text encoder specifically.
Requires you populate the two paths mentioned immediately below this comment section.

You can get them from:
https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/text_encoder_2

(rename diffusion_pytorch_model.safetensors to text_encoder_xl.safetensors)


Plan:
   Take input for a single word or phrase.
   Save out calculations, to "generatedXL.safetensors"

   Note that you can generate an embedding from two words, or even more
   
"""

model_path1 = "text_encoder.safetensors"
model_config1 = "text_encoder_config.json"
model_path2 = "text_encoder_2.safetensors"
model_config2 = "text_encoder_2_config.json"

import sys
import torch
from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection
from safetensors.torch import save_file

# 1. Load the pretrained model
# Note that it doesnt like a leading "/" in the name!!


tmodel1=None
tmodel2=None
processor=None

device=torch.device("cuda")

def initCLIPmodel(model_path,model_config):
    global tmodel1
    print("loading",model_path)
    tmodel1 = CLIPTextModel.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
    tmodel1.to(device)

#
# Note the default, required 2 pathnames
def initXLCLIPmodel(model_path,model_config):
    global tmodel2
    print("loading",model_path)
    tmodel2 = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
    tmodel2.to(device)

# a bit wierd, but SDXL seems to still use this tokeninzer
def initCLIPprocessor():
    global processor
    CLIPname= "openai/clip-vit-large-patch14"
    print("getting processor from",CLIPname)
    processor = CLIPProcessor.from_pretrained(CLIPname)

def embed_from_text(text):
    global processor,tmodel1
    if processor == None:
        initCLIPprocessor()
        initCLIPmodel(model_path1,model_config1)
    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)

    print("getting embeddings1")
    outputs = tmodel1(**inputs)
    embeddings = outputs.pooler_output
    return embeddings

def embed_from_text2(text):
    global processor,tmodel2
    if tmodel2 == None:
        initXLCLIPmodel(model_path2,model_config2)
    inputs = processor(text=text, return_tensors="pt")
    inputs.to(device)

    print("getting embeddings2")
    outputs = tmodel2(**inputs)
    embeddings = outputs.text_embeds
    return embeddings



##########################################

word = input("type a phrase to generate an embedding for: ")

emb1 = embed_from_text(word)
emb2 = embed_from_text2(word)

print("Shape of results = ",emb1.shape,emb2.shape)

output = "generated_XL.safetensors"
# if single word used, then rename output file
if all(char.isalpha() for char in word):
    output=f"{word}_XL.safetensors"
print(f"Saving to {output}...")
save_file({"clip_g": emb2,"clip_l":emb1}, output)