init
Browse files- export.py +51 -0
- model.ckpt +3 -0
- model.onnx +3 -0
export.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from anime_aesthetic import AnimeAesthetic, model_cfgs
|
5 |
+
|
6 |
+
|
7 |
+
def export_onnx(model, img_size, path):
|
8 |
+
import onnx
|
9 |
+
from onnxsim import simplify
|
10 |
+
torch.onnx.export(model, # model being run
|
11 |
+
torch.randn(1, 3, img_size, img_size), # model input (or a tuple for multiple inputs)
|
12 |
+
path, # where to save the model (can be a file or file-like object)
|
13 |
+
export_params=True, # store the trained parameter weights inside the model file
|
14 |
+
opset_version=11, # the ONNX version to export the model to
|
15 |
+
do_constant_folding=True, # whether to execute constant folding for optimization
|
16 |
+
input_names=["img"], # the model's input names
|
17 |
+
output_names=["score"], # the model's output names
|
18 |
+
verbose=True
|
19 |
+
)
|
20 |
+
onnx_model = onnx.load(path)
|
21 |
+
model_simp, check = simplify(onnx_model)
|
22 |
+
assert check, "Simplified ONNX model could not be validated"
|
23 |
+
onnx.save(model_simp, path)
|
24 |
+
print('finished exporting onnx')
|
25 |
+
|
26 |
+
|
27 |
+
if __name__ == "__main__":
|
28 |
+
parser = argparse.ArgumentParser()
|
29 |
+
# model args
|
30 |
+
parser.add_argument(
|
31 |
+
"--cfg",
|
32 |
+
type=str,
|
33 |
+
default="tiny",
|
34 |
+
choices=list(model_cfgs.keys()),
|
35 |
+
help="model configure",
|
36 |
+
)
|
37 |
+
parser.add_argument('--ckpt', type=str, default='lightning_logs/version_11/checkpoints/last.ckpt',
|
38 |
+
help='model checkpoint path')
|
39 |
+
parser.add_argument('--out', type=str, default='model.onnx',
|
40 |
+
help='output path')
|
41 |
+
parser.add_argument('--to', type=str, default='onnx', choices=["onnx"],
|
42 |
+
help='export to ()')
|
43 |
+
parser.add_argument('--img-size', type=int, default=768,
|
44 |
+
help='input image size')
|
45 |
+
opt = parser.parse_args()
|
46 |
+
print(opt)
|
47 |
+
|
48 |
+
model = AnimeAesthetic.load_from_checkpoint(opt.ckpt, cfg=opt.cfg, ema_decay=0.999, map_location="cpu",strict=False)
|
49 |
+
model = model.eval()
|
50 |
+
if opt.to == "onnx":
|
51 |
+
export_onnx(model, opt.img_size, opt.out)
|
model.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d937e4d5038a911e2a62650c6b614ec14d92c10be03ce1e2f3976b3a0cf6afa
|
3 |
+
size 446141821
|
model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c002fbe0768282ba62488513c1a60d6fdba06c95a9a097d58dd046a789f76890
|
3 |
+
size 112193244
|