ppbrown commited on
Commit
84892d5
1 Parent(s): 2d217a3

Upload generate-embeddingXL.py

Browse files
Files changed (1) hide show
  1. generate-embeddingXL.py +99 -0
generate-embeddingXL.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """ Work in progress
4
+
5
+ Similar to generate-embedding.py, but outputs in the format
6
+ that SDXL models expect. I hope.
7
+
8
+ Also tries to load the SDXL base text encoder specifically.
9
+ Requires you populate the two paths mentioned immediately below this comment section.
10
+
11
+ You can get them from:
12
+ https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/tree/main/text_encoder_2
13
+
14
+ (rename diffusion_pytorch_model.safetensors to text_encoder_xl.safetensors)
15
+
16
+
17
+ Plan:
18
+ Take input for a single word or phrase.
19
+ Save out calculations, to "generatedXL.safetensors"
20
+
21
+ Note that you can generate an embedding from two words, or even more
22
+
23
+ I could also include a "clip_l" key, but..
24
+ Meh.
25
+ """
26
+
27
+ model_path = "text_encoder_xl.safetensors"
28
+ model_config = "text_encoder_2_config.json"
29
+
30
+ import sys
31
+ import torch
32
+ from transformers import CLIPProcessor, CLIPTextModel, CLIPTextModelWithProjection
33
+ from safetensors.torch import save_file
34
+
35
+ # 1. Load the pretrained model
36
+ # Note that it doesnt like a leading "/" in the name!!
37
+
38
+
39
+ model=None
40
+ processor=None
41
+
42
+ device=torch.device("cuda")
43
+
44
+ # Note the default, required 2 pathnames
45
+ def initXLCLIPmodel():
46
+ global model
47
+ print("loading",model_path)
48
+ model = CLIPTextModelWithProjection.from_pretrained(model_path,config=model_config,local_files_only=True,use_safetensors=True)
49
+ model.to(device)
50
+
51
+ # a bit wierd, but SDXL seems to still use this tokeninzer
52
+ def initCLIPprocessor():
53
+ global processor
54
+ CLIPname= "openai/clip-vit-large-patch14"
55
+ print("getting processor from",CLIPname)
56
+ processor = CLIPProcessor.from_pretrained(CLIPname)
57
+
58
+ def embed_from_text(text):
59
+ global processor,model
60
+ if processor == None:
61
+ initCLIPprocessor()
62
+ initXLCLIPmodel()
63
+ print("getting tokens")
64
+ inputs = processor(text=text, return_tensors="pt")
65
+ inputs.to(device)
66
+
67
+ print("getting embeddings?")
68
+ outputs = model(**inputs)
69
+ print("finalizing")
70
+ embeddings = outputs.text_embeds
71
+ return embeddings
72
+
73
+
74
+
75
+ ##########################################
76
+
77
+ word = input("type a phrase to generate an embedding for: ")
78
+
79
+ emb = embed_from_text(word)
80
+ #embs=emb.unsqueeze(0) # stupid matrix magic to make it the required shape
81
+ embs=emb
82
+
83
+ print("Shape of result = ",embs.shape)
84
+ # Note that programs like shapes such as
85
+ # torch.Size([1, 768])
86
+
87
+ output = "generatedXL.safetensors"
88
+ # if single word used, then rename output file
89
+ if all(char.isalpha() for char in word):
90
+ output=f"{word}XL.safetensors"
91
+ print(f"Saving to {output}...")
92
+ save_file({"clip_g": embs}, output)
93
+
94
+ # technically we are saving a shape ([1][1280])
95
+ # whereas official XL embeddings files, are
96
+ # (clip_g) shape ([8][1280])
97
+ # (clip_l) shape ([8][768])
98
+
99
+