IlyasMoutawwakil HF staff commited on
Commit
b516823
β€’
1 Parent(s): 47984ee
Files changed (3) hide show
  1. README.md +0 -2
  2. app.py +30 -33
  3. requirements.txt +1 -1
README.md CHANGED
@@ -9,8 +9,6 @@ app_file: app.py
9
  hf_oauth: true
10
  hf_oauth_scopes:
11
  - read-repos
12
- - write-repos
13
- - manage-repos
14
  pinned: false
15
  license: apache-2.0
16
  ---
 
9
  hf_oauth: true
10
  hf_oauth_scopes:
11
  - read-repos
 
 
12
  pinned: false
13
  license: apache-2.0
14
  ---
app.py CHANGED
@@ -11,7 +11,7 @@ from config_store import (
11
  )
12
 
13
  import gradio as gr
14
- from huggingface_hub import create_repo, whoami
15
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
16
  from optimum_benchmark.launchers.device_isolation_utils import * # noqa
17
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
@@ -19,8 +19,8 @@ from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
19
  from optimum_benchmark import (
20
  Benchmark,
21
  BenchmarkConfig,
22
- ProcessConfig,
23
  InferenceConfig,
 
24
  PyTorchConfig,
25
  OVConfig,
26
  )
@@ -32,24 +32,12 @@ DEVICE = "cpu"
32
  LAUNCHER = "process"
33
  SCENARIO = "inference"
34
  BACKENDS = ["pytorch", "openvino"]
 
 
35
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
36
 
37
 
38
- def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken] = None):
39
- if oauth_token.token is None or oauth_token.token == "":
40
- raise gr.Error("Please login to be able to run the benchmark.")
41
-
42
- timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
43
- name = whoami(oauth_token.token)["name"]
44
- repo_id = f"{name}/benchmarks"
45
- token = oauth_token.token
46
-
47
- try:
48
- create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
49
- gr.Info(f"πŸ“‚ Created dataset repository {repo_id} on the Hub.")
50
- except Exception:
51
- raise gr.Error(f"Error while creating dataset repository {repo_id} on the Hub.")
52
-
53
  configs = {"process": {}, "inference": {}, "pytorch": {}, "openvino": {}}
54
 
55
  for key, value in inputs.items():
@@ -77,46 +65,55 @@ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken] = None):
77
  task=task, model=model, device=DEVICE, **configs["openvino"]
78
  )
79
 
80
- outputs = {
81
- "pytorch": "Running benchmark for PyTorch backend",
82
- "openvino": "Running benchmark for OpenVINO backend",
83
- }
84
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  yield tuple(outputs[b] for b in BACKENDS)
 
86
 
87
  for backend in BACKENDS:
88
  try:
89
- benchmark_name = f"{timestamp}/{backend}"
 
90
  benchmark_config = BenchmarkConfig(
91
  name=benchmark_name,
92
  backend=configs[backend],
93
  launcher=configs[LAUNCHER],
94
  scenario=configs[SCENARIO],
95
  )
 
 
 
96
  benchmark_config.push_to_hub(
97
- repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
 
 
98
  )
99
- benchmark_report = Benchmark.launch(benchmark_config)
100
  benchmark_report.push_to_hub(
101
- repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
102
- )
103
- benchmark = Benchmark(config=benchmark_config, report=benchmark_report)
104
- benchmark.push_to_hub(
105
- repo_id=repo_id, subfolder=benchmark_name, token=oauth_token.token
106
  )
107
 
108
  except Exception:
109
  outputs[backend] = f"\n```python-traceback\n{traceback.format_exc()}```\n"
110
-
111
  yield tuple(outputs[b] for b in BACKENDS)
112
-
113
  gr.Info(f"❌ Error while running benchmark for {backend} backend.")
114
 
115
  else:
116
  outputs[backend] = f"\n{benchmark_report.to_markdown_text()}\n"
117
-
118
  yield tuple(outputs[b] for b in BACKENDS)
119
-
120
  gr.Info(f"βœ… Benchmark for {backend} backend ran successfully.")
121
 
122
 
 
11
  )
12
 
13
  import gradio as gr
14
+ from huggingface_hub import whoami, login, logout
15
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
16
  from optimum_benchmark.launchers.device_isolation_utils import * # noqa
17
  from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
 
19
  from optimum_benchmark import (
20
  Benchmark,
21
  BenchmarkConfig,
 
22
  InferenceConfig,
23
+ ProcessConfig,
24
  PyTorchConfig,
25
  OVConfig,
26
  )
 
32
  LAUNCHER = "process"
33
  SCENARIO = "inference"
34
  BACKENDS = ["pytorch", "openvino"]
35
+ BENCHMARKS_HF_TOKEN = os.getenv("BENCHMARKS_HF_TOKEN")
36
+ BENCHMARKS_REPO_ID = "optimum-benchmark/OpenVINO-Benchmarks"
37
  TASKS = set(TASKS_TO_OVMODEL.keys()) & set(TASKS_TO_MODEL_LOADERS.keys())
38
 
39
 
40
+ def parse_configs(inputs):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  configs = {"process": {}, "inference": {}, "pytorch": {}, "openvino": {}}
42
 
43
  for key, value in inputs.items():
 
65
  task=task, model=model, device=DEVICE, **configs["openvino"]
66
  )
67
 
68
+ return configs
 
 
 
69
 
70
+
71
+ def run_benchmark(inputs, oauth_token: Optional[gr.OAuthToken]):
72
+ if oauth_token.token is None or oauth_token.token == "":
73
+ raise gr.Error("Please login to be able to run the benchmark.")
74
+
75
+ timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
76
+ use_name = whoami(oauth_token.token)["name"]
77
+ folder = f"{use_name}/{timestamp}"
78
+
79
+ gr.Info(f"πŸ“© Benchmark will be saved under {BENCHMARKS_REPO_ID}/{folder}")
80
+
81
+ outputs = {backend: "Running..." for backend in BACKENDS}
82
  yield tuple(outputs[b] for b in BACKENDS)
83
+ configs = parse_configs(inputs)
84
 
85
  for backend in BACKENDS:
86
  try:
87
+ login(token=oauth_token.token)
88
+ benchmark_name = f"{folder}/{backend}"
89
  benchmark_config = BenchmarkConfig(
90
  name=benchmark_name,
91
  backend=configs[backend],
92
  launcher=configs[LAUNCHER],
93
  scenario=configs[SCENARIO],
94
  )
95
+ benchmark_report = Benchmark.launch(benchmark_config)
96
+ logout()
97
+
98
  benchmark_config.push_to_hub(
99
+ repo_id=BENCHMARKS_REPO_ID,
100
+ subfolder=benchmark_name,
101
+ token=BENCHMARKS_HF_TOKEN,
102
  )
 
103
  benchmark_report.push_to_hub(
104
+ repo_id=BENCHMARKS_REPO_ID,
105
+ subfolder=benchmark_name,
106
+ token=BENCHMARKS_HF_TOKEN,
 
 
107
  )
108
 
109
  except Exception:
110
  outputs[backend] = f"\n```python-traceback\n{traceback.format_exc()}```\n"
 
111
  yield tuple(outputs[b] for b in BACKENDS)
 
112
  gr.Info(f"❌ Error while running benchmark for {backend} backend.")
113
 
114
  else:
115
  outputs[backend] = f"\n{benchmark_report.to_markdown_text()}\n"
 
116
  yield tuple(outputs[b] for b in BACKENDS)
 
117
  gr.Info(f"βœ… Benchmark for {backend} backend ran successfully.")
118
 
119
 
requirements.txt CHANGED
@@ -1,2 +1,2 @@
1
  gradio_huggingfacehub_search
2
- optimum-benchmark[openvino]@git+https://github.com/huggingface/optimum-benchmark.git@markdown-report
 
1
  gradio_huggingfacehub_search
2
+ optimum-benchmark[openvino]