echarlaix HF staff commited on
Commit
b4d8dc7
β€’
1 Parent(s): 3d43c74

update space

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -36,7 +36,10 @@ from optimum.intel import (
36
 
37
  def export(model_id: str, private_repo: bool, oauth_token: gr.OAuthToken):
38
  if oauth_token.token is None:
39
- raise ValueError("You must be logged in to use this space")
 
 
 
40
 
41
  model_name = model_id.split("/")[-1]
42
  username = whoami(oauth_token.token)["name"]
@@ -45,12 +48,10 @@ def export(model_id: str, private_repo: bool, oauth_token: gr.OAuthToken):
45
  task = TasksManager.infer_task_from_model(model_id)
46
 
47
  if task not in _HEAD_TO_AUTOMODELS:
48
- raise ValueError(
49
- f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported"
50
- )
51
 
52
  if task == "text2text-generation":
53
- raise ValueError("Export of Seq2Seq models is currently disabled.")
54
 
55
  auto_model_class = _HEAD_TO_AUTOMODELS[task]
56
  ov_files = _find_files_matching_pattern(
@@ -60,7 +61,7 @@ def export(model_id: str, private_repo: bool, oauth_token: gr.OAuthToken):
60
  )
61
 
62
  if len(ov_files) > 0:
63
- raise Exception(f"Model {model_id} is already converted, skipping..")
64
 
65
  api = HfApi(token=oauth_token.token)
66
 
@@ -99,7 +100,7 @@ def export(model_id: str, private_repo: bool, oauth_token: gr.OAuthToken):
99
  repo_id=new_repo_id,
100
  )
101
  except Exception as e:
102
- raise Exception(f"Error uploading file {file_path}: {e}")
103
 
104
  try:
105
  card = ModelCard.load(model_id, token=oauth_token.token)
@@ -140,6 +141,8 @@ def export(model_id: str, private_repo: bool, oauth_token: gr.OAuthToken):
140
  repo_id=new_repo_id,
141
  )
142
  return f"This model was successfully exported, find it under your repo {new_repo_url}'"
 
 
143
  finally:
144
  shutil.rmtree(folder, ignore_errors=True)
145
 
@@ -147,6 +150,8 @@ DESCRIPTION = """
147
  This Space uses [Optimum Intel](https://huggingface.co/docs/optimum/main/en/intel/openvino/export) to automatically export a model from the Hub to the [OpenVINO format](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html).
148
 
149
  The resulting model will then be pushed under your HF user namespace.
 
 
150
  """
151
 
152
  model_id = HuggingfaceHubSearch(
 
36
 
37
  def export(model_id: str, private_repo: bool, oauth_token: gr.OAuthToken):
38
  if oauth_token.token is None:
39
+ return "You must be logged in to use this space"
40
+
41
+ if not model_id:
42
+ return "### Invalid input 🐞 Please specify a model name, got {model_id}"
43
 
44
  model_name = model_id.split("/")[-1]
45
  username = whoami(oauth_token.token)["name"]
 
48
  task = TasksManager.infer_task_from_model(model_id)
49
 
50
  if task not in _HEAD_TO_AUTOMODELS:
51
+ return f"The task '{task}' is not supported, only {_HEAD_TO_AUTOMODELS.keys()} tasks are supported"
 
 
52
 
53
  if task == "text2text-generation":
54
+ return "Export of Seq2Seq models is currently disabled"
55
 
56
  auto_model_class = _HEAD_TO_AUTOMODELS[task]
57
  ov_files = _find_files_matching_pattern(
 
61
  )
62
 
63
  if len(ov_files) > 0:
64
+ return f"Model {model_id} is already converted, skipping.."
65
 
66
  api = HfApi(token=oauth_token.token)
67
 
 
100
  repo_id=new_repo_id,
101
  )
102
  except Exception as e:
103
+ return f"Error uploading file {file_path}: {e}"
104
 
105
  try:
106
  card = ModelCard.load(model_id, token=oauth_token.token)
 
141
  repo_id=new_repo_id,
142
  )
143
  return f"This model was successfully exported, find it under your repo {new_repo_url}'"
144
+ except Exception as e:
145
+ return f"### Error: {e}"
146
  finally:
147
  shutil.rmtree(folder, ignore_errors=True)
148
 
 
150
  This Space uses [Optimum Intel](https://huggingface.co/docs/optimum/main/en/intel/openvino/export) to automatically export a model from the Hub to the [OpenVINO format](https://docs.openvino.ai/2024/documentation/openvino-ir-format.html).
151
 
152
  The resulting model will then be pushed under your HF user namespace.
153
+
154
+ The list of the supported architectures can be found in the [documentation](https://huggingface.co/docs/optimum/main/en/intel/openvino/models)
155
  """
156
 
157
  model_id = HuggingfaceHubSearch(