Spaces:
Paused
Paused
Upload 94 files
Browse files- diffsynth/models/__init__.py +81 -1
- diffsynth/models/downloader.py +278 -0
- diffsynth/prompts/hunyuan_dit_prompter.py +9 -3
- diffsynth/prompts/sd_prompter.py +5 -1
- diffsynth/prompts/sdxl_prompter.py +9 -3
- diffsynth/prompts/utils.py +2 -2
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/special_tokens_map.json +7 -0
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/tokenizer_config.json +16 -0
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab.txt +0 -0
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab_org.txt +0 -0
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/config.json +28 -0
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/special_tokens_map.json +1 -0
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/spiece.model +3 -0
- diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/tokenizer_config.json +1 -0
- diffsynth/tokenizer_configs/stable_diffusion/tokenizer/merges.txt +0 -0
- diffsynth/tokenizer_configs/stable_diffusion/tokenizer/special_tokens_map.json +24 -0
- diffsynth/tokenizer_configs/stable_diffusion/tokenizer/tokenizer_config.json +34 -0
- diffsynth/tokenizer_configs/stable_diffusion/tokenizer/vocab.json +0 -0
- diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/merges.txt +0 -0
- diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/special_tokens_map.json +24 -0
- diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/tokenizer_config.json +38 -0
- diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/vocab.json +0 -0
diffsynth/models/__init__.py
CHANGED
@@ -1,5 +1,9 @@
|
|
1 |
import torch, os
|
2 |
from safetensors import safe_open
|
|
|
|
|
|
|
|
|
3 |
|
4 |
from .sd_text_encoder import SDTextEncoder
|
5 |
from .sd_unet import SDUNet
|
@@ -29,13 +33,89 @@ from .hunyuan_dit_text_encoder import HunyuanDiTCLIPTextEncoder, HunyuanDiTT5Tex
|
|
29 |
from .hunyuan_dit import HunyuanDiT
|
30 |
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
class ModelManager:
|
33 |
-
def __init__(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
self.torch_dtype = torch_dtype
|
35 |
self.device = device
|
36 |
self.model = {}
|
37 |
self.model_path = {}
|
38 |
self.textual_inversion_dict = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
def is_stable_video_diffusion(self, state_dict):
|
41 |
param_name = "model.diffusion_model.output_blocks.9.1.time_stack.0.norm_in.weight"
|
|
|
1 |
import torch, os
|
2 |
from safetensors import safe_open
|
3 |
+
from typing_extensions import Literal, TypeAlias
|
4 |
+
from typing import List
|
5 |
+
|
6 |
+
from .downloader import download_from_huggingface, download_from_modelscope
|
7 |
|
8 |
from .sd_text_encoder import SDTextEncoder
|
9 |
from .sd_unet import SDUNet
|
|
|
33 |
from .hunyuan_dit import HunyuanDiT
|
34 |
|
35 |
|
36 |
+
preset_models_on_huggingface = {
|
37 |
+
"HunyuanDiT": [
|
38 |
+
("Tencent-Hunyuan/HunyuanDiT", "t2i/clip_text_encoder/pytorch_model.bin", "models/HunyuanDiT/t2i/clip_text_encoder"),
|
39 |
+
("Tencent-Hunyuan/HunyuanDiT", "t2i/mt5/pytorch_model.bin", "models/HunyuanDiT/t2i/mt5"),
|
40 |
+
("Tencent-Hunyuan/HunyuanDiT", "t2i/model/pytorch_model_ema.pt", "models/HunyuanDiT/t2i/model"),
|
41 |
+
("Tencent-Hunyuan/HunyuanDiT", "t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin", "models/HunyuanDiT/t2i/sdxl-vae-fp16-fix"),
|
42 |
+
],
|
43 |
+
"stable-video-diffusion-img2vid-xt": [
|
44 |
+
("stabilityai/stable-video-diffusion-img2vid-xt", "svd_xt.safetensors", "models/stable_video_diffusion"),
|
45 |
+
],
|
46 |
+
"ExVideo-SVD-128f-v1": [
|
47 |
+
("ECNU-CILab/ExVideo-SVD-128f-v1", "model.fp16.safetensors", "models/stable_video_diffusion"),
|
48 |
+
],
|
49 |
+
}
|
50 |
+
preset_models_on_modelscope = {
|
51 |
+
"HunyuanDiT": [
|
52 |
+
("modelscope/HunyuanDiT", "t2i/clip_text_encoder/pytorch_model.bin", "models/HunyuanDiT/t2i/clip_text_encoder"),
|
53 |
+
("modelscope/HunyuanDiT", "t2i/mt5/pytorch_model.bin", "models/HunyuanDiT/t2i/mt5"),
|
54 |
+
("modelscope/HunyuanDiT", "t2i/model/pytorch_model_ema.pt", "models/HunyuanDiT/t2i/model"),
|
55 |
+
("modelscope/HunyuanDiT", "t2i/sdxl-vae-fp16-fix/diffusion_pytorch_model.bin", "models/HunyuanDiT/t2i/sdxl-vae-fp16-fix"),
|
56 |
+
],
|
57 |
+
"stable-video-diffusion-img2vid-xt": [
|
58 |
+
("AI-ModelScope/stable-video-diffusion-img2vid-xt", "svd_xt.safetensors", "models/stable_video_diffusion"),
|
59 |
+
],
|
60 |
+
"ExVideo-SVD-128f-v1": [
|
61 |
+
("ECNU-CILab/ExVideo-SVD-128f-v1", "model.fp16.safetensors", "models/stable_video_diffusion"),
|
62 |
+
],
|
63 |
+
}
|
64 |
+
Preset_model_id: TypeAlias = Literal[
|
65 |
+
"HunyuanDiT",
|
66 |
+
"stable-video-diffusion-img2vid-xt",
|
67 |
+
"ExVideo-SVD-128f-v1"
|
68 |
+
]
|
69 |
+
Preset_model_website: TypeAlias = Literal[
|
70 |
+
"HuggingFace",
|
71 |
+
"ModelScope",
|
72 |
+
]
|
73 |
+
website_to_preset_models = {
|
74 |
+
"HuggingFace": preset_models_on_huggingface,
|
75 |
+
"ModelScope": preset_models_on_modelscope,
|
76 |
+
}
|
77 |
+
website_to_download_fn = {
|
78 |
+
"HuggingFace": download_from_huggingface,
|
79 |
+
"ModelScope": download_from_modelscope,
|
80 |
+
}
|
81 |
+
|
82 |
+
|
83 |
class ModelManager:
|
84 |
+
def __init__(
|
85 |
+
self,
|
86 |
+
torch_dtype=torch.float16,
|
87 |
+
device="cuda",
|
88 |
+
model_id_list: List[Preset_model_id] = [],
|
89 |
+
downloading_priority: List[Preset_model_website] = ["ModelScope", "HuggingFace"],
|
90 |
+
file_path_list: List[str] = [],
|
91 |
+
):
|
92 |
self.torch_dtype = torch_dtype
|
93 |
self.device = device
|
94 |
self.model = {}
|
95 |
self.model_path = {}
|
96 |
self.textual_inversion_dict = {}
|
97 |
+
downloaded_files = self.download_models(model_id_list, downloading_priority)
|
98 |
+
self.load_models(downloaded_files + file_path_list)
|
99 |
+
|
100 |
+
def download_models(
|
101 |
+
self,
|
102 |
+
model_id_list: List[Preset_model_id] = [],
|
103 |
+
downloading_priority: List[Preset_model_website] = ["ModelScope", "HuggingFace"],
|
104 |
+
):
|
105 |
+
downloaded_files = []
|
106 |
+
for model_id in model_id_list:
|
107 |
+
for website in downloading_priority:
|
108 |
+
if model_id in website_to_preset_models[website]:
|
109 |
+
for model_id, origin_file_path, local_dir in website_to_preset_models[website][model_id]:
|
110 |
+
# Check if the file is downloaded.
|
111 |
+
file_to_download = os.path.join(local_dir, os.path.basename(origin_file_path))
|
112 |
+
if file_to_download in downloaded_files:
|
113 |
+
continue
|
114 |
+
# Download
|
115 |
+
website_to_download_fn[website](model_id, origin_file_path, local_dir)
|
116 |
+
if os.path.basename(origin_file_path) in os.listdir(local_dir):
|
117 |
+
downloaded_files.append(file_to_download)
|
118 |
+
return downloaded_files
|
119 |
|
120 |
def is_stable_video_diffusion(self, state_dict):
|
121 |
param_name = "model.diffusion_model.output_blocks.9.1.time_stack.0.norm_in.weight"
|
diffsynth/models/downloader.py
ADDED
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import hf_hub_download
|
2 |
+
from http.cookiejar import CookieJar
|
3 |
+
from pathlib import Path
|
4 |
+
from typing import Dict, Optional, List, Union
|
5 |
+
import copy, uuid, requests, io, platform, pickle, os, urllib
|
6 |
+
from requests.adapters import Retry
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
|
10 |
+
def _get_sep(path):
|
11 |
+
if isinstance(path, bytes):
|
12 |
+
return b'/'
|
13 |
+
else:
|
14 |
+
return '/'
|
15 |
+
|
16 |
+
|
17 |
+
def expanduser(path):
|
18 |
+
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
19 |
+
do nothing."""
|
20 |
+
path = os.fspath(path)
|
21 |
+
if isinstance(path, bytes):
|
22 |
+
tilde = b'~'
|
23 |
+
else:
|
24 |
+
tilde = '~'
|
25 |
+
if not path.startswith(tilde):
|
26 |
+
return path
|
27 |
+
sep = _get_sep(path)
|
28 |
+
i = path.find(sep, 1)
|
29 |
+
if i < 0:
|
30 |
+
i = len(path)
|
31 |
+
if i == 1:
|
32 |
+
if 'HOME' not in os.environ:
|
33 |
+
import pwd
|
34 |
+
try:
|
35 |
+
userhome = pwd.getpwuid(os.getuid()).pw_dir
|
36 |
+
except KeyError:
|
37 |
+
# bpo-10496: if the current user identifier doesn't exist in the
|
38 |
+
# password database, return the path unchanged
|
39 |
+
return path
|
40 |
+
else:
|
41 |
+
userhome = os.environ['HOME']
|
42 |
+
else:
|
43 |
+
import pwd
|
44 |
+
name = path[1:i]
|
45 |
+
if isinstance(name, bytes):
|
46 |
+
name = str(name, 'ASCII')
|
47 |
+
try:
|
48 |
+
pwent = pwd.getpwnam(name)
|
49 |
+
except KeyError:
|
50 |
+
# bpo-10496: if the user name from the path doesn't exist in the
|
51 |
+
# password database, return the path unchanged
|
52 |
+
return path
|
53 |
+
userhome = pwent.pw_dir
|
54 |
+
if isinstance(path, bytes):
|
55 |
+
userhome = os.fsencode(userhome)
|
56 |
+
root = b'/'
|
57 |
+
else:
|
58 |
+
root = '/'
|
59 |
+
userhome = userhome.rstrip(root)
|
60 |
+
return (userhome + path[i:]) or root
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
class ModelScopeConfig:
|
65 |
+
DEFAULT_CREDENTIALS_PATH = Path.home().joinpath('.modelscope', 'credentials')
|
66 |
+
path_credential = expanduser(DEFAULT_CREDENTIALS_PATH)
|
67 |
+
COOKIES_FILE_NAME = 'cookies'
|
68 |
+
GIT_TOKEN_FILE_NAME = 'git_token'
|
69 |
+
USER_INFO_FILE_NAME = 'user'
|
70 |
+
USER_SESSION_ID_FILE_NAME = 'session'
|
71 |
+
|
72 |
+
@staticmethod
|
73 |
+
def make_sure_credential_path_exist():
|
74 |
+
os.makedirs(ModelScopeConfig.path_credential, exist_ok=True)
|
75 |
+
|
76 |
+
@staticmethod
|
77 |
+
def get_user_session_id():
|
78 |
+
session_path = os.path.join(ModelScopeConfig.path_credential,
|
79 |
+
ModelScopeConfig.USER_SESSION_ID_FILE_NAME)
|
80 |
+
session_id = ''
|
81 |
+
if os.path.exists(session_path):
|
82 |
+
with open(session_path, 'rb') as f:
|
83 |
+
session_id = str(f.readline().strip(), encoding='utf-8')
|
84 |
+
return session_id
|
85 |
+
if session_id == '' or len(session_id) != 32:
|
86 |
+
session_id = str(uuid.uuid4().hex)
|
87 |
+
ModelScopeConfig.make_sure_credential_path_exist()
|
88 |
+
with open(session_path, 'w+') as wf:
|
89 |
+
wf.write(session_id)
|
90 |
+
|
91 |
+
return session_id
|
92 |
+
|
93 |
+
@staticmethod
|
94 |
+
def get_user_agent(user_agent: Union[Dict, str, None] = None, ) -> str:
|
95 |
+
"""Formats a user-agent string with basic info about a request.
|
96 |
+
|
97 |
+
Args:
|
98 |
+
user_agent (`str`, `dict`, *optional*):
|
99 |
+
The user agent info in the form of a dictionary or a single string.
|
100 |
+
|
101 |
+
Returns:
|
102 |
+
The formatted user-agent string.
|
103 |
+
"""
|
104 |
+
|
105 |
+
# include some more telemetrics when executing in dedicated
|
106 |
+
# cloud containers
|
107 |
+
MODELSCOPE_CLOUD_ENVIRONMENT = 'MODELSCOPE_ENVIRONMENT'
|
108 |
+
MODELSCOPE_CLOUD_USERNAME = 'MODELSCOPE_USERNAME'
|
109 |
+
env = 'custom'
|
110 |
+
if MODELSCOPE_CLOUD_ENVIRONMENT in os.environ:
|
111 |
+
env = os.environ[MODELSCOPE_CLOUD_ENVIRONMENT]
|
112 |
+
user_name = 'unknown'
|
113 |
+
if MODELSCOPE_CLOUD_USERNAME in os.environ:
|
114 |
+
user_name = os.environ[MODELSCOPE_CLOUD_USERNAME]
|
115 |
+
|
116 |
+
ua = 'modelscope/%s; python/%s; session_id/%s; platform/%s; processor/%s; env/%s; user/%s' % (
|
117 |
+
"1.15.0",
|
118 |
+
platform.python_version(),
|
119 |
+
ModelScopeConfig.get_user_session_id(),
|
120 |
+
platform.platform(),
|
121 |
+
platform.processor(),
|
122 |
+
env,
|
123 |
+
user_name,
|
124 |
+
)
|
125 |
+
if isinstance(user_agent, dict):
|
126 |
+
ua += '; ' + '; '.join(f'{k}/{v}' for k, v in user_agent.items())
|
127 |
+
elif isinstance(user_agent, str):
|
128 |
+
ua += '; ' + user_agent
|
129 |
+
return ua
|
130 |
+
|
131 |
+
@staticmethod
|
132 |
+
def get_cookies():
|
133 |
+
cookies_path = os.path.join(ModelScopeConfig.path_credential,
|
134 |
+
ModelScopeConfig.COOKIES_FILE_NAME)
|
135 |
+
if os.path.exists(cookies_path):
|
136 |
+
with open(cookies_path, 'rb') as f:
|
137 |
+
cookies = pickle.load(f)
|
138 |
+
return cookies
|
139 |
+
return None
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
def modelscope_http_get_model_file(
|
144 |
+
url: str,
|
145 |
+
local_dir: str,
|
146 |
+
file_name: str,
|
147 |
+
file_size: int,
|
148 |
+
cookies: CookieJar,
|
149 |
+
headers: Optional[Dict[str, str]] = None,
|
150 |
+
):
|
151 |
+
"""Download remote file, will retry 5 times before giving up on errors.
|
152 |
+
|
153 |
+
Args:
|
154 |
+
url(str):
|
155 |
+
actual download url of the file
|
156 |
+
local_dir(str):
|
157 |
+
local directory where the downloaded file stores
|
158 |
+
file_name(str):
|
159 |
+
name of the file stored in `local_dir`
|
160 |
+
file_size(int):
|
161 |
+
The file size.
|
162 |
+
cookies(CookieJar):
|
163 |
+
cookies used to authentication the user, which is used for downloading private repos
|
164 |
+
headers(Dict[str, str], optional):
|
165 |
+
http headers to carry necessary info when requesting the remote file
|
166 |
+
|
167 |
+
Raises:
|
168 |
+
FileDownloadError: File download failed.
|
169 |
+
|
170 |
+
"""
|
171 |
+
get_headers = {} if headers is None else copy.deepcopy(headers)
|
172 |
+
get_headers['X-Request-ID'] = str(uuid.uuid4().hex)
|
173 |
+
temp_file_path = os.path.join(local_dir, file_name)
|
174 |
+
# retry sleep 0.5s, 1s, 2s, 4s
|
175 |
+
retry = Retry(
|
176 |
+
total=5,
|
177 |
+
backoff_factor=1,
|
178 |
+
allowed_methods=['GET'])
|
179 |
+
while True:
|
180 |
+
try:
|
181 |
+
progress = tqdm(
|
182 |
+
unit='B',
|
183 |
+
unit_scale=True,
|
184 |
+
unit_divisor=1024,
|
185 |
+
total=file_size,
|
186 |
+
initial=0,
|
187 |
+
desc='Downloading',
|
188 |
+
)
|
189 |
+
partial_length = 0
|
190 |
+
if os.path.exists(
|
191 |
+
temp_file_path): # download partial, continue download
|
192 |
+
with open(temp_file_path, 'rb') as f:
|
193 |
+
partial_length = f.seek(0, io.SEEK_END)
|
194 |
+
progress.update(partial_length)
|
195 |
+
if partial_length > file_size:
|
196 |
+
break
|
197 |
+
get_headers['Range'] = 'bytes=%s-%s' % (partial_length,
|
198 |
+
file_size - 1)
|
199 |
+
with open(temp_file_path, 'ab') as f:
|
200 |
+
r = requests.get(
|
201 |
+
url,
|
202 |
+
stream=True,
|
203 |
+
headers=get_headers,
|
204 |
+
cookies=cookies,
|
205 |
+
timeout=60)
|
206 |
+
r.raise_for_status()
|
207 |
+
for chunk in r.iter_content(
|
208 |
+
chunk_size=1024 * 1024 * 1):
|
209 |
+
if chunk: # filter out keep-alive new chunks
|
210 |
+
progress.update(len(chunk))
|
211 |
+
f.write(chunk)
|
212 |
+
progress.close()
|
213 |
+
break
|
214 |
+
except (Exception) as e: # no matter what happen, we will retry.
|
215 |
+
retry = retry.increment('GET', url, error=e)
|
216 |
+
retry.sleep()
|
217 |
+
|
218 |
+
|
219 |
+
def get_endpoint():
|
220 |
+
MODELSCOPE_URL_SCHEME = 'https://'
|
221 |
+
DEFAULT_MODELSCOPE_DOMAIN = 'www.modelscope.cn'
|
222 |
+
modelscope_domain = os.getenv('MODELSCOPE_DOMAIN',
|
223 |
+
DEFAULT_MODELSCOPE_DOMAIN)
|
224 |
+
return MODELSCOPE_URL_SCHEME + modelscope_domain
|
225 |
+
|
226 |
+
|
227 |
+
def get_file_download_url(model_id: str, file_path: str, revision: str):
|
228 |
+
"""Format file download url according to `model_id`, `revision` and `file_path`.
|
229 |
+
e.g., Given `model_id=john/bert`, `revision=master`, `file_path=README.md`,
|
230 |
+
the resulted download url is: https://modelscope.cn/api/v1/models/john/bert/repo?Revision=master&FilePath=README.md
|
231 |
+
|
232 |
+
Args:
|
233 |
+
model_id (str): The model_id.
|
234 |
+
file_path (str): File path
|
235 |
+
revision (str): File revision.
|
236 |
+
|
237 |
+
Returns:
|
238 |
+
str: The file url.
|
239 |
+
"""
|
240 |
+
file_path = urllib.parse.quote_plus(file_path)
|
241 |
+
revision = urllib.parse.quote_plus(revision)
|
242 |
+
download_url_template = '{endpoint}/api/v1/models/{model_id}/repo?Revision={revision}&FilePath={file_path}'
|
243 |
+
return download_url_template.format(
|
244 |
+
endpoint=get_endpoint(),
|
245 |
+
model_id=model_id,
|
246 |
+
revision=revision,
|
247 |
+
file_path=file_path,
|
248 |
+
)
|
249 |
+
|
250 |
+
|
251 |
+
def download_from_modelscope(model_id, origin_file_path, local_dir):
|
252 |
+
os.makedirs(local_dir, exist_ok=True)
|
253 |
+
if os.path.basename(origin_file_path) in os.listdir(local_dir):
|
254 |
+
print(f"{os.path.basename(origin_file_path)} has been already in {local_dir}.")
|
255 |
+
return
|
256 |
+
else:
|
257 |
+
print(f"Start downloading {os.path.join(local_dir, os.path.basename(origin_file_path))}")
|
258 |
+
headers = {'user-agent': ModelScopeConfig.get_user_agent(user_agent=None)}
|
259 |
+
cookies = ModelScopeConfig.get_cookies()
|
260 |
+
url = get_file_download_url(model_id=model_id, file_path=origin_file_path, revision="master")
|
261 |
+
modelscope_http_get_model_file(
|
262 |
+
url,
|
263 |
+
local_dir,
|
264 |
+
os.path.basename(origin_file_path),
|
265 |
+
file_size=0,
|
266 |
+
headers=headers,
|
267 |
+
cookies=cookies
|
268 |
+
)
|
269 |
+
|
270 |
+
|
271 |
+
def download_from_huggingface(model_id, origin_file_path, local_dir):
|
272 |
+
os.makedirs(local_dir, exist_ok=True)
|
273 |
+
if os.path.basename(origin_file_path) in os.listdir(local_dir):
|
274 |
+
print(f"{os.path.basename(origin_file_path)} has been already in {local_dir}.")
|
275 |
+
return
|
276 |
+
else:
|
277 |
+
print(f"Start downloading {os.path.join(local_dir, os.path.basename(origin_file_path))}")
|
278 |
+
hf_hub_download(model_id, origin_file_path, local_dir=local_dir)
|
diffsynth/prompts/hunyuan_dit_prompter.py
CHANGED
@@ -1,14 +1,20 @@
|
|
1 |
from .utils import Prompter
|
2 |
from transformers import BertModel, T5EncoderModel, BertTokenizer, AutoTokenizer
|
3 |
-
import warnings
|
4 |
|
5 |
|
6 |
class HunyuanDiTPrompter(Prompter):
|
7 |
def __init__(
|
8 |
self,
|
9 |
-
tokenizer_path=
|
10 |
-
tokenizer_t5_path=
|
11 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
super().__init__()
|
13 |
self.tokenizer = BertTokenizer.from_pretrained(tokenizer_path)
|
14 |
with warnings.catch_warnings():
|
|
|
1 |
from .utils import Prompter
|
2 |
from transformers import BertModel, T5EncoderModel, BertTokenizer, AutoTokenizer
|
3 |
+
import warnings, os
|
4 |
|
5 |
|
6 |
class HunyuanDiTPrompter(Prompter):
|
7 |
def __init__(
|
8 |
self,
|
9 |
+
tokenizer_path=None,
|
10 |
+
tokenizer_t5_path=None
|
11 |
):
|
12 |
+
if tokenizer_path is None:
|
13 |
+
base_path = os.path.dirname(os.path.dirname(__file__))
|
14 |
+
tokenizer_path = os.path.join(base_path, "tokenizer_configs/hunyuan_dit/tokenizer")
|
15 |
+
if tokenizer_t5_path is None:
|
16 |
+
base_path = os.path.dirname(os.path.dirname(__file__))
|
17 |
+
tokenizer_t5_path = os.path.join(base_path, "tokenizer_configs/hunyuan_dit/tokenizer_t5")
|
18 |
super().__init__()
|
19 |
self.tokenizer = BertTokenizer.from_pretrained(tokenizer_path)
|
20 |
with warnings.catch_warnings():
|
diffsynth/prompts/sd_prompter.py
CHANGED
@@ -1,10 +1,14 @@
|
|
1 |
from .utils import Prompter, tokenize_long_prompt
|
2 |
from transformers import CLIPTokenizer
|
3 |
from ..models import SDTextEncoder
|
|
|
4 |
|
5 |
|
6 |
class SDPrompter(Prompter):
|
7 |
-
def __init__(self, tokenizer_path=
|
|
|
|
|
|
|
8 |
super().__init__()
|
9 |
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
|
10 |
|
|
|
1 |
from .utils import Prompter, tokenize_long_prompt
|
2 |
from transformers import CLIPTokenizer
|
3 |
from ..models import SDTextEncoder
|
4 |
+
import os
|
5 |
|
6 |
|
7 |
class SDPrompter(Prompter):
|
8 |
+
def __init__(self, tokenizer_path=None):
|
9 |
+
if tokenizer_path is None:
|
10 |
+
base_path = os.path.dirname(os.path.dirname(__file__))
|
11 |
+
tokenizer_path = os.path.join(base_path, "tokenizer_configs/stable_diffusion/tokenizer")
|
12 |
super().__init__()
|
13 |
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
|
14 |
|
diffsynth/prompts/sdxl_prompter.py
CHANGED
@@ -1,15 +1,21 @@
|
|
1 |
from .utils import Prompter, tokenize_long_prompt
|
2 |
from transformers import CLIPTokenizer
|
3 |
from ..models import SDXLTextEncoder, SDXLTextEncoder2
|
4 |
-
import torch
|
5 |
|
6 |
|
7 |
class SDXLPrompter(Prompter):
|
8 |
def __init__(
|
9 |
self,
|
10 |
-
tokenizer_path=
|
11 |
-
tokenizer_2_path=
|
12 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
super().__init__()
|
14 |
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
|
15 |
self.tokenizer_2 = CLIPTokenizer.from_pretrained(tokenizer_2_path)
|
|
|
1 |
from .utils import Prompter, tokenize_long_prompt
|
2 |
from transformers import CLIPTokenizer
|
3 |
from ..models import SDXLTextEncoder, SDXLTextEncoder2
|
4 |
+
import torch, os
|
5 |
|
6 |
|
7 |
class SDXLPrompter(Prompter):
|
8 |
def __init__(
|
9 |
self,
|
10 |
+
tokenizer_path=None,
|
11 |
+
tokenizer_2_path=None
|
12 |
):
|
13 |
+
if tokenizer_path is None:
|
14 |
+
base_path = os.path.dirname(os.path.dirname(__file__))
|
15 |
+
tokenizer_path = os.path.join(base_path, "tokenizer_configs/stable_diffusion/tokenizer")
|
16 |
+
if tokenizer_2_path is None:
|
17 |
+
base_path = os.path.dirname(os.path.dirname(__file__))
|
18 |
+
tokenizer_2_path = os.path.join(base_path, "tokenizer_configs/stable_diffusion_xl/tokenizer_2")
|
19 |
super().__init__()
|
20 |
self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)
|
21 |
self.tokenizer_2 = CLIPTokenizer.from_pretrained(tokenizer_2_path)
|
diffsynth/prompts/utils.py
CHANGED
@@ -36,7 +36,7 @@ def tokenize_long_prompt(tokenizer, prompt):
|
|
36 |
|
37 |
|
38 |
class BeautifulPrompt:
|
39 |
-
def __init__(self, tokenizer_path=
|
40 |
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
|
41 |
self.model = model
|
42 |
self.template = 'Instruction: Give a simple description of the image to generate a drawing prompt.\nInput: {raw_prompt}\nOutput:'
|
@@ -62,7 +62,7 @@ class BeautifulPrompt:
|
|
62 |
|
63 |
|
64 |
class Translator:
|
65 |
-
def __init__(self, tokenizer_path=
|
66 |
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
|
67 |
self.model = model
|
68 |
|
|
|
36 |
|
37 |
|
38 |
class BeautifulPrompt:
|
39 |
+
def __init__(self, tokenizer_path=None, model=None):
|
40 |
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
|
41 |
self.model = model
|
42 |
self.template = 'Instruction: Give a simple description of the image to generate a drawing prompt.\nInput: {raw_prompt}\nOutput:'
|
|
|
62 |
|
63 |
|
64 |
class Translator:
|
65 |
+
def __init__(self, tokenizer_path=None, model=None):
|
66 |
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
|
67 |
self.model = model
|
68 |
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"do_basic_tokenize": true,
|
4 |
+
"do_lower_case": true,
|
5 |
+
"mask_token": "[MASK]",
|
6 |
+
"name_or_path": "hfl/chinese-roberta-wwm-ext",
|
7 |
+
"never_split": null,
|
8 |
+
"pad_token": "[PAD]",
|
9 |
+
"sep_token": "[SEP]",
|
10 |
+
"special_tokens_map_file": "/home/chenweifeng/.cache/huggingface/hub/models--hfl--chinese-roberta-wwm-ext/snapshots/5c58d0b8ec1d9014354d691c538661bf00bfdb44/special_tokens_map.json",
|
11 |
+
"strip_accents": null,
|
12 |
+
"tokenize_chinese_chars": true,
|
13 |
+
"tokenizer_class": "BertTokenizer",
|
14 |
+
"unk_token": "[UNK]",
|
15 |
+
"model_max_length": 77
|
16 |
+
}
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer/vocab_org.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/home/patrick/t5/mt5-xl",
|
3 |
+
"architectures": [
|
4 |
+
"MT5ForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"d_ff": 5120,
|
7 |
+
"d_kv": 64,
|
8 |
+
"d_model": 2048,
|
9 |
+
"decoder_start_token_id": 0,
|
10 |
+
"dropout_rate": 0.1,
|
11 |
+
"eos_token_id": 1,
|
12 |
+
"feed_forward_proj": "gated-gelu",
|
13 |
+
"initializer_factor": 1.0,
|
14 |
+
"is_encoder_decoder": true,
|
15 |
+
"layer_norm_epsilon": 1e-06,
|
16 |
+
"model_type": "mt5",
|
17 |
+
"num_decoder_layers": 24,
|
18 |
+
"num_heads": 32,
|
19 |
+
"num_layers": 24,
|
20 |
+
"output_past": true,
|
21 |
+
"pad_token_id": 0,
|
22 |
+
"relative_attention_num_buckets": 32,
|
23 |
+
"tie_word_embeddings": false,
|
24 |
+
"tokenizer_class": "T5Tokenizer",
|
25 |
+
"transformers_version": "4.10.0.dev0",
|
26 |
+
"use_cache": true,
|
27 |
+
"vocab_size": 250112
|
28 |
+
}
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
|
3 |
+
size 4309802
|
diffsynth/tokenizer_configs/hunyuan_dit/tokenizer_t5/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 0, "additional_special_tokens": null, "special_tokens_map_file": "", "tokenizer_file": null, "name_or_path": "google/mt5-small", "model_max_length": 256, "legacy": true}
|
diffsynth/tokenizer_configs/stable_diffusion/tokenizer/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
diffsynth/tokenizer_configs/stable_diffusion/tokenizer/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "<|endoftext|>",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
diffsynth/tokenizer_configs/stable_diffusion/tokenizer/tokenizer_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": {
|
4 |
+
"__type": "AddedToken",
|
5 |
+
"content": "<|startoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false
|
10 |
+
},
|
11 |
+
"do_lower_case": true,
|
12 |
+
"eos_token": {
|
13 |
+
"__type": "AddedToken",
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
},
|
20 |
+
"errors": "replace",
|
21 |
+
"model_max_length": 77,
|
22 |
+
"name_or_path": "openai/clip-vit-large-patch14",
|
23 |
+
"pad_token": "<|endoftext|>",
|
24 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
25 |
+
"tokenizer_class": "CLIPTokenizer",
|
26 |
+
"unk_token": {
|
27 |
+
"__type": "AddedToken",
|
28 |
+
"content": "<|endoftext|>",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": true,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false
|
33 |
+
}
|
34 |
+
}
|
diffsynth/tokenizer_configs/stable_diffusion/tokenizer/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/special_tokens_map.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|startoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": "!",
|
17 |
+
"unk_token": {
|
18 |
+
"content": "<|endoftext|>",
|
19 |
+
"lstrip": false,
|
20 |
+
"normalized": true,
|
21 |
+
"rstrip": false,
|
22 |
+
"single_word": false
|
23 |
+
}
|
24 |
+
}
|
diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/tokenizer_config.json
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "!",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"49406": {
|
13 |
+
"content": "<|startoftext|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"49407": {
|
21 |
+
"content": "<|endoftext|>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"bos_token": "<|startoftext|>",
|
30 |
+
"clean_up_tokenization_spaces": true,
|
31 |
+
"do_lower_case": true,
|
32 |
+
"eos_token": "<|endoftext|>",
|
33 |
+
"errors": "replace",
|
34 |
+
"model_max_length": 77,
|
35 |
+
"pad_token": "!",
|
36 |
+
"tokenizer_class": "CLIPTokenizer",
|
37 |
+
"unk_token": "<|endoftext|>"
|
38 |
+
}
|
diffsynth/tokenizer_configs/stable_diffusion_xl/tokenizer_2/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|