Spaces:
Running
Running
File size: 5,920 Bytes
d9f713b 67f027f d9f713b 1f08499 d9f713b 242da95 d9f713b 242da95 d9f713b 3a01f1b d9f713b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import argparse
import os
import shutil
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional, Tuple
import torch
from huggingface_hub import (
CommitOperationAdd,
HfApi,
get_repo_discussions,
hf_hub_download,
)
from huggingface_hub.file_download import repo_folder_name
from optimum.exporters.onnx import validate_model_outputs
from optimum.exporters.tasks import TasksManager
from transformers import AutoConfig, AutoTokenizer, is_torch_available
from optimum.intel.openvino import (
OVModelForAudioClassification,
OVModelForCausalLM,
OVModelForFeatureExtraction,
OVModelForImageClassification,
OVModelForMaskedLM,
OVModelForQuestionAnswering,
OVModelForSeq2SeqLM,
OVModelForSequenceClassification,
OVModelForTokenClassification,
OVStableDiffusionPipeline,
)
from optimum.intel.utils.constant import _TASK_ALIASES
from optimum.intel.openvino.utils import _HEAD_TO_AUTOMODELS
SPACES_URL = "https://huggingface.co/spaces/echarlaix/openvino-export"
def previous_pr(api: "HfApi", model_id: str, pr_title: str) -> Optional["Discussion"]:
try:
discussions = api.get_repo_discussions(repo_id=model_id)
except Exception:
return None
for discussion in discussions:
if (
discussion.status == "open"
and discussion.is_pull_request
and discussion.title == pr_title
):
return discussion
def convert_openvino(model_id: str, task: str, folder: str) -> List:
task = _TASK_ALIASES.get(task, task)
if task not in _HEAD_TO_AUTOMODELS:
raise ValueError(f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported")
if task == "text2text-generation":
raise ValueError("Export of Seq2Seq models is currently disabled.")
auto_model_class = eval(_HEAD_TO_AUTOMODELS[task])
ov_model = auto_model_class.from_pretrained(model_id, export=True)
ov_model.save_pretrained(folder)
if not isinstance(ov_model, OVStableDiffusionPipeline):
try:
model = TasksManager.get_model_from_task(task, model_id)
exporter_config_class = TasksManager.get_exporter_config_constructor(
exporter="openvino",
model=model,
task=task,
model_name=model_id,
model_type=model.config.model_type.replace("_", "-"),
)
openvino_config = exporter_config_class(model.config)
inputs = openvino_config.generate_dummy_inputs(framework="pt")
ov_outputs = ov_model(**inputs)
outputs = model(**inputs)
for output_name in ov_outputs:
if isinstance(outputs, torch.Tensor) and not torch.allclose(outputs[output_name], ov_outputs[output_name], atol=1e-3):
raise ValueError(
"The exported model does not have the same outputs as the original model. Export interrupted."
)
except Exception as e:
raise
file_names = {elem for elem in os.listdir(folder) if os.path.isfile(os.path.join(folder, elem))}
operations = [
CommitOperationAdd(
path_in_repo=file_name, path_or_fileobj=os.path.join(folder, file_name)
)
for file_name in file_names if "openvino" in file_name
]
dir_names = set(os.listdir(folder)) - file_names
for dir_name in dir_names.intersection({"vae_encoder", "vae_decoder", "text_encoder", "unet"}):
operations += [
CommitOperationAdd(
path_in_repo=os.path.join(dir_name, file_name),
path_or_fileobj=os.path.join(folder, dir_name, file_name),
)
for file_name in os.listdir(os.path.join(folder, dir_name)) if "openvino" in file_name
]
return operations
def convert(
api: "HfApi",
model_id: str,
task: str,
force: bool = False,
) -> Tuple[int, "CommitInfo"]:
pr_title = "Adding OpenVINO file of this model"
info = api.model_info(model_id)
filenames = set(s.rfilename for s in info.siblings)
requesting_user = api.whoami()["name"]
if task == "auto":
try:
task = TasksManager.infer_task_from_model(model_id)
except Exception as e:
return (
f"### Error: {e}. Please pass explicitely the task as it could not be infered.",
None,
)
with TemporaryDirectory() as d:
folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
os.makedirs(folder)
new_pr = None
try:
pr = previous_pr(api, model_id, pr_title)
if "openvino_model.xml" in filenames and not force:
raise Exception(f"Model {model_id} is already converted, skipping..")
elif pr is not None and not force:
url = f"https://huggingface.co/{model_id}/discussions/{pr.num}"
new_pr = pr
raise Exception(
f"Model {model_id} already has an open PR check out [{url}]({url})"
)
else:
operations = convert_openvino(model_id, task, folder)
commit_description = f"""
Beep boop I am the [OpenVINO exporter bot 🤖]({SPACES_URL}). On behalf of [{requesting_user}](https://huggingface.co/{requesting_user}), I would like to add to this repository the exported OpenVINO model.
"""
new_pr = api.create_commit(
repo_id=model_id,
operations=operations,
commit_message=pr_title,
commit_description=commit_description,
create_pr=True,
)
finally:
shutil.rmtree(folder)
return "0", new_pr
|