echarlaix HF staff commited on
Commit
097130c
β€’
1 Parent(s): b4d8dc7

add support of gated and private models

Browse files
Files changed (2) hide show
  1. app.py +143 -110
  2. requirements.txt +1 -2
app.py CHANGED
@@ -10,9 +10,7 @@ from pathlib import Path
10
  from tempfile import TemporaryDirectory
11
 
12
  from huggingface_hub.file_download import repo_folder_name
13
- from optimum.exporters.tasks import TasksManager
14
  from optimum.intel.utils.constant import _TASK_ALIASES
15
- from optimum.intel.openvino.utils import _HEAD_TO_AUTOMODELS
16
  from optimum.exporters import TasksManager
17
 
18
  from optimum.intel.utils.modeling_utils import _find_files_matching_pattern
@@ -32,119 +30,148 @@ from optimum.intel import (
32
  OVModelForPix2Struct,
33
  OVWeightQuantizationConfig,
34
  )
35
-
36
-
37
- def export(model_id: str, private_repo: bool, oauth_token: gr.OAuthToken):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  if oauth_token.token is None:
39
  return "You must be logged in to use this space"
40
 
41
  if not model_id:
42
- return "### Invalid input 🐞 Please specify a model name, got {model_id}"
43
-
44
- model_name = model_id.split("/")[-1]
45
- username = whoami(oauth_token.token)["name"]
46
- new_repo_id = f"{username}/{model_name}-openvino"
47
- # task = TasksManager.infer_task_from_model(model_id, token=oauth_token.token)
48
- task = TasksManager.infer_task_from_model(model_id)
49
-
50
- if task not in _HEAD_TO_AUTOMODELS:
51
- return f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported"
52
-
53
- if task == "text2text-generation":
54
- return "Export of Seq2Seq models is currently disabled"
55
-
56
- auto_model_class = _HEAD_TO_AUTOMODELS[task]
57
- ov_files = _find_files_matching_pattern(
58
- model_id,
59
- pattern=r"(.*)?openvino(.*)?\_model.xml",
60
- use_auth_token=oauth_token.token,
61
- )
62
-
63
- if len(ov_files) > 0:
64
- return f"Model {model_id} is already converted, skipping.."
65
-
66
- api = HfApi(token=oauth_token.token)
67
-
68
- with TemporaryDirectory() as d:
69
- folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
70
- os.makedirs(folder)
71
- try:
72
- api.snapshot_download(repo_id=model_id, local_dir=folder, allow_patterns=["*.json"])
73
- ov_model = eval(auto_model_class).from_pretrained(model_id, export=True, cache_dir=folder, token=oauth_token.token)
74
- ov_model.save_pretrained(folder)
75
- new_repo_url = api.create_repo(repo_id=new_repo_id, exist_ok=True, private=private_repo)
76
- new_repo_id = new_repo_url.repo_id
77
- print("Repo created successfully!", new_repo_url)
78
-
79
- folder = Path(folder)
80
- for dir_name in (
81
- "",
82
- "vae_encoder",
83
- "vae_decoder",
84
- "text_encoder",
85
- "text_encoder_2",
86
- "unet",
87
- "tokenizer",
88
- "tokenizer_2",
89
- "scheduler",
90
- "feature_extractor",
91
- ):
92
- if not (folder / dir_name).is_dir():
93
- continue
94
- for file_path in (folder / dir_name).iterdir():
95
- if file_path.is_file():
96
- try:
97
- api.upload_file(
98
- path_or_fileobj=file_path,
99
- path_in_repo=os.path.join(dir_name, file_path.name),
100
- repo_id=new_repo_id,
101
- )
102
- except Exception as e:
103
- return f"Error uploading file {file_path}: {e}"
104
-
105
  try:
106
- card = ModelCard.load(model_id, token=oauth_token.token)
107
- except:
108
- card = ModelCard("")
109
-
110
- if card.data.tags is None:
111
- card.data.tags = []
112
- card.data.tags.append("openvino")
113
- card.data.base_model = model_id
114
- card.text = dedent(
115
- f"""
116
- This model was converted to OpenVINO from [`{model_id}`](https://huggingface.co/{model_id}) using [optimum-intel](https://github.com/huggingface/optimum-intel)
117
- via the [export](https://huggingface.co/spaces/echarlaix/openvino-export) space.
118
-
119
- First make sure you have optimum-intel installed:
120
-
121
- ```bash
122
- pip install optimum[openvino]
123
- ```
124
-
125
- To load your model you can do as follows:
126
-
127
- ```python
128
- from optimum.intel import {auto_model_class}
129
-
130
- model_id = "{new_repo_id}"
131
- model = {auto_model_class}.from_pretrained(model_id)
132
- ```
133
- """
134
- )
135
- card_path = os.path.join(folder, "README.md")
136
- card.save(card_path)
137
-
138
- api.upload_file(
139
- path_or_fileobj=card_path,
140
- path_in_repo="README.md",
141
- repo_id=new_repo_id,
142
- )
143
- return f"This model was successfully exported, find it under your repo {new_repo_url}'"
144
- except Exception as e:
145
- return f"### Error: {e}"
146
- finally:
147
- shutil.rmtree(folder, ignore_errors=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
  DESCRIPTION = """
150
  This Space uses [Optimum Intel](https://huggingface.co/docs/optimum/main/en/intel/openvino/export) to automatically export a model from the Hub to the [OpenVINO format](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html).
@@ -164,11 +191,17 @@ private_repo = gr.Checkbox(
164
  label="Private Repo",
165
  info="Create a private repo under your username",
166
  )
 
 
 
 
 
167
  interface = gr.Interface(
168
  fn=export,
169
  inputs=[
170
  model_id,
171
  private_repo,
 
172
  ],
173
  outputs=[
174
  gr.Markdown(label="output"),
 
10
  from tempfile import TemporaryDirectory
11
 
12
  from huggingface_hub.file_download import repo_folder_name
 
13
  from optimum.intel.utils.constant import _TASK_ALIASES
 
14
  from optimum.exporters import TasksManager
15
 
16
  from optimum.intel.utils.modeling_utils import _find_files_matching_pattern
 
30
  OVModelForPix2Struct,
31
  OVWeightQuantizationConfig,
32
  )
33
+ from diffusers import ConfigMixin
34
+
35
+ _HEAD_TO_AUTOMODELS = {
36
+ "feature-extraction": "OVModelForFeatureExtraction",
37
+ "fill-mask": "OVModelForMaskedLM",
38
+ "text-generation": "OVModelForCausalLM",
39
+ "text-classification": "OVModelForSequenceClassification",
40
+ "token-classification": "OVModelForTokenClassification",
41
+ "question-answering": "OVModelForQuestionAnswering",
42
+ "image-classification": "OVModelForImageClassification",
43
+ "audio-classification": "OVModelForAudioClassification",
44
+ "stable-diffusion": "OVStableDiffusionPipeline",
45
+ "stable-diffusion-xl": "OVStableDiffusionXLPipeline",
46
+ "latent-consistency": "OVLatentConsistencyModelPipeline",
47
+ }
48
+
49
+
50
+ def export(model_id: str, private_repo: bool, overwritte: bool, oauth_token: gr.OAuthToken):
51
  if oauth_token.token is None:
52
  return "You must be logged in to use this space"
53
 
54
  if not model_id:
55
+ return f"### Invalid input 🐞 Please specify a model name, got {model_id}"
56
+
57
+ try:
58
+ model_name = model_id.split("/")[-1]
59
+ username = whoami(oauth_token.token)["name"]
60
+ new_repo_id = f"{username}/{model_name}-openvino"
61
+ library_name = TasksManager.infer_library_from_model(model_id, token=oauth_token.token)
62
+
63
+ if library_name == "diffusers":
64
+ ConfigMixin.config_name = "model_index.json"
65
+ class_name = ConfigMixin.load_config(model_id, token=oauth_token.token)["_class_name"].lower()
66
+ if "xl" in class_name:
67
+ task = "stable-diffusion-xl"
68
+ elif "consistency" in class_name:
69
+ task = "latent-consistency"
70
+ else:
71
+ task = "stable-diffusion"
72
+ else:
73
+ task = TasksManager.infer_task_from_model(model_id, token=oauth_token.token)
74
+
75
+ if task == "text2text-generation":
76
+ return "Export of Seq2Seq models is currently disabled"
77
+
78
+ if task not in _HEAD_TO_AUTOMODELS:
79
+ return f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported"
80
+
81
+ auto_model_class = _HEAD_TO_AUTOMODELS[task]
82
+ ov_files = _find_files_matching_pattern(
83
+ model_id,
84
+ pattern=r"(.*)?openvino(.*)?\_model.xml",
85
+ use_auth_token=oauth_token.token,
86
+ )
87
+
88
+ if len(ov_files) > 0:
89
+ return f"Model {model_id} is already converted, skipping.."
90
+
91
+ api = HfApi(token=oauth_token.token)
92
+ if api.repo_exists(new_repo_id) and not overwritte:
93
+ return f"Model {new_repo_id} already exist, please set overwritte=True to push on an existing repo"
94
+
95
+ with TemporaryDirectory() as d:
96
+ folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
97
+ os.makedirs(folder)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  try:
99
+ api.snapshot_download(repo_id=model_id, local_dir=folder, allow_patterns=["*.json"])
100
+ ov_model = eval(auto_model_class).from_pretrained(model_id, export=True, cache_dir=folder, token=oauth_token.token)
101
+ ov_model.save_pretrained(folder)
102
+ new_repo_url = api.create_repo(repo_id=new_repo_id, exist_ok=True, private=private_repo)
103
+ new_repo_id = new_repo_url.repo_id
104
+ print("Repo created successfully!", new_repo_url)
105
+
106
+ folder = Path(folder)
107
+ for dir_name in (
108
+ "",
109
+ "vae_encoder",
110
+ "vae_decoder",
111
+ "text_encoder",
112
+ "text_encoder_2",
113
+ "unet",
114
+ "tokenizer",
115
+ "tokenizer_2",
116
+ "scheduler",
117
+ "feature_extractor",
118
+ ):
119
+ if not (folder / dir_name).is_dir():
120
+ continue
121
+ for file_path in (folder / dir_name).iterdir():
122
+ if file_path.is_file():
123
+ try:
124
+ api.upload_file(
125
+ path_or_fileobj=file_path,
126
+ path_in_repo=os.path.join(dir_name, file_path.name),
127
+ repo_id=new_repo_id,
128
+ )
129
+ except Exception as e:
130
+ return f"Error uploading file {file_path}: {e}"
131
+
132
+ try:
133
+ card = ModelCard.load(model_id, token=oauth_token.token)
134
+ except:
135
+ card = ModelCard("")
136
+
137
+ if card.data.tags is None:
138
+ card.data.tags = []
139
+ card.data.tags.append("openvino")
140
+ card.data.base_model = model_id
141
+ card.text = dedent(
142
+ f"""
143
+ This model was converted to OpenVINO from [`{model_id}`](https://huggingface.co/{model_id}) using [optimum-intel](https://github.com/huggingface/optimum-intel)
144
+ via the [export](https://huggingface.co/spaces/echarlaix/openvino-export) space.
145
+
146
+ First make sure you have optimum-intel installed:
147
+
148
+ ```bash
149
+ pip install optimum[openvino]
150
+ ```
151
+
152
+ To load your model you can do as follows:
153
+
154
+ ```python
155
+ from optimum.intel import {auto_model_class}
156
+
157
+ model_id = "{new_repo_id}"
158
+ model = {auto_model_class}.from_pretrained(model_id)
159
+ ```
160
+ """
161
+ )
162
+ card_path = os.path.join(folder, "README.md")
163
+ card.save(card_path)
164
+
165
+ api.upload_file(
166
+ path_or_fileobj=card_path,
167
+ path_in_repo="README.md",
168
+ repo_id=new_repo_id,
169
+ )
170
+ return f"This model was successfully exported, find it under your repo {new_repo_url}'"
171
+ finally:
172
+ shutil.rmtree(folder, ignore_errors=True)
173
+ except Exception as e:
174
+ return f"### Error: {e}"
175
 
176
  DESCRIPTION = """
177
  This Space uses [Optimum Intel](https://huggingface.co/docs/optimum/main/en/intel/openvino/export) to automatically export a model from the Hub to the [OpenVINO format](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html).
 
191
  label="Private Repo",
192
  info="Create a private repo under your username",
193
  )
194
+ overwritte = gr.Checkbox(
195
+ value=False,
196
+ label="Overwrite repo content",
197
+ info="Push files on existing repo potentially overwriting existing files",
198
+ )
199
  interface = gr.Interface(
200
  fn=export,
201
  inputs=[
202
  model_id,
203
  private_repo,
204
+ overwritte,
205
  ],
206
  outputs=[
207
  gr.Markdown(label="output"),
requirements.txt CHANGED
@@ -3,7 +3,6 @@ gradio[oauth]>=4.37.2
3
  gradio_huggingfacehub_search==0.0.6
4
  transformers==4.42.4
5
  diffusers==0.29.1
6
- optimum==1.21.2
7
- optimum-intel==1.18.1
8
  openvino
9
  nncf
 
3
  gradio_huggingfacehub_search==0.0.6
4
  transformers==4.42.4
5
  diffusers==0.29.1
6
+ git+https://github.com/huggingface/optimum-intel.git
 
7
  openvino
8
  nncf