The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code:   DatasetGenerationError
Exception:    ArrowNotImplementedError
Message:      Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field.
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single
                  writer.write_table(table)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 620, in write_table
                  self._build_writer(inferred_schema=pa_table.schema)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer
                  self.pa_writer = self._WRITER_CLASS(self.stream, schema)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__
                  self.writer = _parquet.ParquetWriter(
                File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__
                File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
                File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
              pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field.
              
              During handling of the above exception, another exception occurred:
              
              Traceback (most recent call last):
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1886, in _prepare_split_single
                  num_examples, num_bytes = writer.finalize()
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 639, in finalize
                  self._build_writer(self.schema)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer
                  self.pa_writer = self._WRITER_CLASS(self.stream, schema)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__
                  self.writer = _parquet.ParquetWriter(
                File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__
                File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status
                File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status
              pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field.
              
              The above exception was the direct cause of the following exception:
              
              Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1417, in compute_config_parquet_and_info_response
                  parquet_operations = convert_to_parquet(builder)
                File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1049, in convert_to_parquet
                  builder.download_and_prepare(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare
                  self._download_and_prepare(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare
                  self._prepare_split(split_generator, **prepare_split_kwargs)
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split
                  for job_id, done, content in self._prepare_split_single(
                File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single
                  raise DatasetGenerationError("An error occurred while generating the dataset") from e
              datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

config
dict
report
dict
name
string
backend
dict
scenario
dict
launcher
dict
environment
dict
print_report
bool
log_report
bool
overall
dict
warmup
dict
train
dict
{ "name": "cpu_training_transformers_fill-mask_google-bert/bert-base-uncased", "backend": { "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "fill-mask", "library": "transformers", "model_type": "bert", "model": "google-bert/bert-base-uncased", "processor": "google-bert/bert-base-uncased", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }, "print_report": true, "log_report": true }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2498.625536, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.6646431359999951, 0.5508785669999838, 0.5433905630000027, 0.548464628999966, 0.543980815999987 ], "count": 5, "total": 2.8513577109999346, "mean": 0.5702715421999869, "p50": 0.548464628999966, "p90": 0.6191373083999906, "p95": 0.6418902221999928, "p99": 0.6600925532399947, "stdev": 0.04726820494507546, "stdev_": 8.28871887289426 }, "throughput": { "unit": "samples/s", "value": 17.535505912537943 }, "energy": { "unit": "kWh", "cpu": 0.00011802738651666497, "ram": 0.000004933497438480545, "gpu": 0, "total": 0.00012296088395514552 }, "efficiency": { "unit": "samples/kWh", "value": 81326.67624321785 } }, "warmup": { "memory": { "unit": "MB", "max_ram": 2498.625536, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.6646431359999951, 0.5508785669999838 ], "count": 2, "total": 1.215521702999979, "mean": 0.6077608514999895, "p50": 0.6077608514999895, "p90": 0.653266679099994, "p95": 0.6589549075499945, "p99": 0.6635054903099951, "stdev": 0.05688228450000565, "stdev_": 9.359320258884203 }, "throughput": { "unit": "samples/s", "value": 6.581536125809626 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2498.625536, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.5433905630000027, 0.548464628999966, 0.543980815999987 ], "count": 3, "total": 1.6358360079999557, "mean": 0.5452786693333186, "p50": 0.543980815999987, "p90": 0.5475678663999701, "p95": 0.548016247699968, "p99": 0.5483749527399664, "stdev": 0.002265664567571828, "stdev_": 0.41550581289782124 }, "throughput": { "unit": "samples/s", "value": 11.003547979120219 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
null
null
cpu_training_transformers_fill-mask_google-bert/bert-base-uncased
{ "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "fill-mask", "library": "transformers", "model_type": "bert", "model": "google-bert/bert-base-uncased", "processor": "google-bert/bert-base-uncased", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }
{ "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }
{ "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }
{ "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }
true
true
null
null
null
null
null
null
null
null
null
null
null
null
{ "memory": { "unit": "MB", "max_ram": 2498.625536, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.6646431359999951, 0.5508785669999838, 0.5433905630000027, 0.548464628999966, 0.543980815999987 ], "count": 5, "total": 2.8513577109999346, "mean": 0.5702715421999869, "p50": 0.548464628999966, "p90": 0.6191373083999906, "p95": 0.6418902221999928, "p99": 0.6600925532399947, "stdev": 0.04726820494507546, "stdev_": 8.28871887289426 }, "throughput": { "unit": "samples/s", "value": 17.535505912537943 }, "energy": { "unit": "kWh", "cpu": 0.00011802738651666497, "ram": 0.000004933497438480545, "gpu": 0, "total": 0.00012296088395514552 }, "efficiency": { "unit": "samples/kWh", "value": 81326.67624321785 } }
{ "memory": { "unit": "MB", "max_ram": 2498.625536, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.6646431359999951, 0.5508785669999838 ], "count": 2, "total": 1.215521702999979, "mean": 0.6077608514999895, "p50": 0.6077608514999895, "p90": 0.653266679099994, "p95": 0.6589549075499945, "p99": 0.6635054903099951, "stdev": 0.05688228450000565, "stdev_": 9.359320258884203 }, "throughput": { "unit": "samples/s", "value": 6.581536125809626 }, "energy": null, "efficiency": null }
{ "memory": { "unit": "MB", "max_ram": 2498.625536, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.5433905630000027, 0.548464628999966, 0.543980815999987 ], "count": 3, "total": 1.6358360079999557, "mean": 0.5452786693333186, "p50": 0.543980815999987, "p90": 0.5475678663999701, "p95": 0.548016247699968, "p99": 0.5483749527399664, "stdev": 0.002265664567571828, "stdev_": 0.41550581289782124 }, "throughput": { "unit": "samples/s", "value": 11.003547979120219 }, "energy": null, "efficiency": null }
{ "name": "cpu_training_transformers_fill-mask_google-bert/bert-base-uncased", "backend": { "name": "pytorch", "version": "2.3.0+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "fill-mask", "model": "google-bert/bert-base-uncased", "library": "transformers", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "hub_kwargs": { "revision": "main", "force_download": false, "local_files_only": false, "trust_remote_code": false }, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": false }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": "error", "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.346304, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.14", "optimum_benchmark_version": "0.2.0", "optimum_benchmark_commit": "2e77e02d1fd3ab0d2e788c3d89c12299219a25e8", "transformers_version": "4.40.2", "transformers_commit": null, "accelerate_version": "0.30.0", "accelerate_commit": null, "diffusers_version": "0.27.2", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "0.9.16", "timm_commit": null, "peft_version": null, "peft_commit": null } }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2488.782848, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 5, "total": 2.738775005999969, "mean": 0.5477550011999938, "stdev": 0.03693447784258994, "p50": 0.5307143729999666, "p90": 0.5856752317999963, "p95": 0.6036043034000045, "p99": 0.6179475606800111, "values": [ 0.6215333750000127, 0.5307143729999666, 0.5270229210000252, 0.5276163199999928, 0.5318880169999716 ] }, "throughput": { "unit": "samples/s", "value": 18.25633719106628 }, "energy": null, "efficiency": null }, "warmup": { "memory": { "unit": "MB", "max_ram": 2488.782848, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 2, "total": 1.1522477479999793, "mean": 0.5761238739999897, "stdev": 0.045409501000023056, "p50": 0.5761238739999897, "p90": 0.612451474800008, "p95": 0.6169924249000104, "p99": 0.6206251849800123, "values": [ 0.6215333750000127, 0.5307143729999666 ] }, "throughput": { "unit": "samples/s", "value": 6.942951300088064 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2488.782848, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 3, "total": 1.5865272579999896, "mean": 0.5288424193333299, "stdev": 0.0021671455040491463, "p50": 0.5276163199999928, "p90": 0.5310336775999758, "p95": 0.5314608472999737, "p99": 0.531802583059972, "values": [ 0.5270229210000252, 0.5276163199999928, 0.5318880169999716 ] }, "throughput": { "unit": "samples/s", "value": 11.345534663357242 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
{ "name": "cpu_training_transformers_image-classification_google/vit-base-patch16-224", "backend": { "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "image-classification", "library": "transformers", "model_type": "vit", "model": "google/vit-base-patch16-224", "processor": "google/vit-base-patch16-224", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }, "print_report": true, "log_report": true }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2574.209024, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.6462145919999784, 1.5132404050000332, 1.523379704999968, 1.5218380649999972, 1.5011460709999938 ], "count": 5, "total": 7.705818837999971, "mean": 1.541163767599994, "p50": 1.5218380649999972, "p90": 1.5970806371999742, "p95": 1.6216476145999763, "p99": 1.641301196519978, "stdev": 0.05311602694238358, "stdev_": 3.4464881707606905 }, "throughput": { "unit": "samples/s", "value": 6.488603099963014 }, "energy": { "unit": "kWh", "cpu": 0.0003109086155000019, "ram": 0.000012996396672568943, "gpu": 0, "total": 0.0003239050121725709 }, "efficiency": { "unit": "samples/kWh", "value": 30873.248712410095 } }, "warmup": { "memory": { "unit": "MB", "max_ram": 2574.209024, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.6462145919999784, 1.5132404050000332 ], "count": 2, "total": 3.1594549970000116, "mean": 1.5797274985000058, "p50": 1.5797274985000058, "p90": 1.6329171732999839, "p95": 1.6395658826499813, "p99": 1.644884850129979, "stdev": 0.06648709349997262, "stdev_": 4.208769775996425 }, "throughput": { "unit": "samples/s", "value": 2.5320822760875585 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2574.209024, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.523379704999968, 1.5218380649999972, 1.5011460709999938 ], "count": 3, "total": 4.546363840999959, "mean": 1.515454613666653, "p50": 1.5218380649999972, "p90": 1.5230713769999737, "p95": 1.5232255409999709, "p99": 1.5233488721999686, "stdev": 0.010137223762124852, "stdev_": 0.6689229535946161 }, "throughput": { "unit": "samples/s", "value": 3.9592079801604605 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
null
null
cpu_training_transformers_image-classification_google/vit-base-patch16-224
{ "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "image-classification", "library": "transformers", "model_type": "vit", "model": "google/vit-base-patch16-224", "processor": "google/vit-base-patch16-224", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }
{ "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }
{ "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }
{ "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }
true
true
null
null
null
null
null
null
null
null
null
null
null
null
{ "memory": { "unit": "MB", "max_ram": 2574.209024, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.6462145919999784, 1.5132404050000332, 1.523379704999968, 1.5218380649999972, 1.5011460709999938 ], "count": 5, "total": 7.705818837999971, "mean": 1.541163767599994, "p50": 1.5218380649999972, "p90": 1.5970806371999742, "p95": 1.6216476145999763, "p99": 1.641301196519978, "stdev": 0.05311602694238358, "stdev_": 3.4464881707606905 }, "throughput": { "unit": "samples/s", "value": 6.488603099963014 }, "energy": { "unit": "kWh", "cpu": 0.0003109086155000019, "ram": 0.000012996396672568943, "gpu": 0, "total": 0.0003239050121725709 }, "efficiency": { "unit": "samples/kWh", "value": 30873.248712410095 } }
{ "memory": { "unit": "MB", "max_ram": 2574.209024, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.6462145919999784, 1.5132404050000332 ], "count": 2, "total": 3.1594549970000116, "mean": 1.5797274985000058, "p50": 1.5797274985000058, "p90": 1.6329171732999839, "p95": 1.6395658826499813, "p99": 1.644884850129979, "stdev": 0.06648709349997262, "stdev_": 4.208769775996425 }, "throughput": { "unit": "samples/s", "value": 2.5320822760875585 }, "energy": null, "efficiency": null }
{ "memory": { "unit": "MB", "max_ram": 2574.209024, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.523379704999968, 1.5218380649999972, 1.5011460709999938 ], "count": 3, "total": 4.546363840999959, "mean": 1.515454613666653, "p50": 1.5218380649999972, "p90": 1.5230713769999737, "p95": 1.5232255409999709, "p99": 1.5233488721999686, "stdev": 0.010137223762124852, "stdev_": 0.6689229535946161 }, "throughput": { "unit": "samples/s", "value": 3.9592079801604605 }, "energy": null, "efficiency": null }
{ "name": "cpu_training_transformers_image-classification_google/vit-base-patch16-224", "backend": { "name": "pytorch", "version": "2.3.0+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "image-classification", "model": "google/vit-base-patch16-224", "library": "transformers", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "hub_kwargs": { "revision": "main", "force_download": false, "local_files_only": false, "trust_remote_code": false }, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": false }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": "error", "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.346304, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.14", "optimum_benchmark_version": "0.2.0", "optimum_benchmark_commit": "2e77e02d1fd3ab0d2e788c3d89c12299219a25e8", "transformers_version": "4.40.2", "transformers_commit": null, "accelerate_version": "0.30.0", "accelerate_commit": null, "diffusers_version": "0.27.2", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "0.9.16", "timm_commit": null, "peft_version": null, "peft_commit": null } }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2442.985472, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 5, "total": 7.2970974209999895, "mean": 1.459419484199998, "stdev": 0.05210139006345095, "p50": 1.4401334369999859, "p90": 1.521250764199999, "p95": 1.5379663595999886, "p99": 1.5513388359199802, "values": [ 1.5546819549999782, 1.4241662819999874, 1.4401334369999859, 1.4711039780000306, 1.4070117690000075 ] }, "throughput": { "unit": "samples/s", "value": 6.852039532336137 }, "energy": null, "efficiency": null }, "warmup": { "memory": { "unit": "MB", "max_ram": 2442.985472, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 2, "total": 2.9788482369999656, "mean": 1.4894241184999828, "stdev": 0.06525783649999539, "p50": 1.4894241184999828, "p90": 1.541630387699979, "p95": 1.5481561713499787, "p99": 1.5533767982699782, "values": [ 1.5546819549999782, 1.4241662819999874 ] }, "throughput": { "unit": "samples/s", "value": 2.6856017371522416 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2442.985472, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 3, "total": 4.318249184000024, "mean": 1.4394163946666747, "stdev": 0.02617044676610726, "p50": 1.4401334369999859, "p90": 1.4649098698000216, "p95": 1.4680069239000262, "p99": 1.4704845671800297, "values": [ 1.4401334369999859, 1.4711039780000306, 1.4070117690000075 ] }, "throughput": { "unit": "samples/s", "value": 4.1683560241714614 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
{ "name": "cpu_training_transformers_multiple-choice_FacebookAI/roberta-base", "backend": { "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "multiple-choice", "library": "transformers", "model_type": "roberta", "model": "FacebookAI/roberta-base", "processor": "FacebookAI/roberta-base", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }, "print_report": true, "log_report": true }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2901.221376, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.8321957569999654, 0.7080643560000226, 0.7062936599999716, 0.7091921459999639, 0.7225947690000112 ], "count": 5, "total": 3.6783406879999347, "mean": 0.735668137599987, "p50": 0.7091921459999639, "p90": 0.7883553617999837, "p95": 0.8102755593999745, "p99": 0.8278117174799672, "stdev": 0.04860926063555123, "stdev_": 6.607498429133013 }, "throughput": { "unit": "samples/s", "value": 13.593085644056277 }, "energy": { "unit": "kWh", "cpu": 0.00015033946176666642, "ram": 0.0000062841079038453525, "gpu": 0, "total": 0.00015662356967051177 }, "efficiency": { "unit": "samples/kWh", "value": 63847.35082361455 } }, "warmup": { "memory": { "unit": "MB", "max_ram": 2901.221376, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.8321957569999654, 0.7080643560000226 ], "count": 2, "total": 1.540260112999988, "mean": 0.770130056499994, "p50": 0.770130056499994, "p90": 0.8197826168999711, "p95": 0.8259891869499683, "p99": 0.830954442989966, "stdev": 0.0620657004999714, "stdev_": 8.059119362519244 }, "throughput": { "unit": "samples/s", "value": 5.1939279167713295 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2901.221376, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7062936599999716, 0.7091921459999639, 0.7225947690000112 ], "count": 3, "total": 2.1380805749999467, "mean": 0.7126935249999823, "p50": 0.7091921459999639, "p90": 0.7199142444000017, "p95": 0.7212545067000065, "p99": 0.7223267165400102, "stdev": 0.0071005295499288685, "stdev_": 0.9962949431775805 }, "throughput": { "unit": "samples/s", "value": 8.418765976581797 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
null
null
cpu_training_transformers_multiple-choice_FacebookAI/roberta-base
{ "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "multiple-choice", "library": "transformers", "model_type": "roberta", "model": "FacebookAI/roberta-base", "processor": "FacebookAI/roberta-base", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }
{ "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }
{ "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }
{ "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }
true
true
null
null
null
null
null
null
null
null
null
null
null
null
{ "memory": { "unit": "MB", "max_ram": 2901.221376, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.8321957569999654, 0.7080643560000226, 0.7062936599999716, 0.7091921459999639, 0.7225947690000112 ], "count": 5, "total": 3.6783406879999347, "mean": 0.735668137599987, "p50": 0.7091921459999639, "p90": 0.7883553617999837, "p95": 0.8102755593999745, "p99": 0.8278117174799672, "stdev": 0.04860926063555123, "stdev_": 6.607498429133013 }, "throughput": { "unit": "samples/s", "value": 13.593085644056277 }, "energy": { "unit": "kWh", "cpu": 0.00015033946176666642, "ram": 0.0000062841079038453525, "gpu": 0, "total": 0.00015662356967051177 }, "efficiency": { "unit": "samples/kWh", "value": 63847.35082361455 } }
{ "memory": { "unit": "MB", "max_ram": 2901.221376, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.8321957569999654, 0.7080643560000226 ], "count": 2, "total": 1.540260112999988, "mean": 0.770130056499994, "p50": 0.770130056499994, "p90": 0.8197826168999711, "p95": 0.8259891869499683, "p99": 0.830954442989966, "stdev": 0.0620657004999714, "stdev_": 8.059119362519244 }, "throughput": { "unit": "samples/s", "value": 5.1939279167713295 }, "energy": null, "efficiency": null }
{ "memory": { "unit": "MB", "max_ram": 2901.221376, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7062936599999716, 0.7091921459999639, 0.7225947690000112 ], "count": 3, "total": 2.1380805749999467, "mean": 0.7126935249999823, "p50": 0.7091921459999639, "p90": 0.7199142444000017, "p95": 0.7212545067000065, "p99": 0.7223267165400102, "stdev": 0.0071005295499288685, "stdev_": 0.9962949431775805 }, "throughput": { "unit": "samples/s", "value": 8.418765976581797 }, "energy": null, "efficiency": null }
{ "name": "cpu_training_transformers_multiple-choice_FacebookAI/roberta-base", "backend": { "name": "pytorch", "version": "2.3.0+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "multiple-choice", "model": "FacebookAI/roberta-base", "library": "transformers", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "hub_kwargs": { "revision": "main", "force_download": false, "local_files_only": false, "trust_remote_code": false }, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": false }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": "error", "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.346304, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.14", "optimum_benchmark_version": "0.2.0", "optimum_benchmark_commit": "2e77e02d1fd3ab0d2e788c3d89c12299219a25e8", "transformers_version": "4.40.2", "transformers_commit": null, "accelerate_version": "0.30.0", "accelerate_commit": null, "diffusers_version": "0.27.2", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "0.9.16", "timm_commit": null, "peft_version": null, "peft_commit": null } }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2845.749248, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 5, "total": 3.581090587999995, "mean": 0.716218117599999, "stdev": 0.043372798377969854, "p50": 0.697155070000008, "p90": 0.7645524997999928, "p95": 0.7826806403999967, "p99": 0.7971831528799999, "values": [ 0.8008087810000006, 0.710168077999981, 0.6928677590000234, 0.697155070000008, 0.680090899999982 ] }, "throughput": { "unit": "samples/s", "value": 13.962227084549829 }, "energy": null, "efficiency": null }, "warmup": { "memory": { "unit": "MB", "max_ram": 2845.749248, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 2, "total": 1.5109768589999817, "mean": 0.7554884294999908, "stdev": 0.04532035150000979, "p50": 0.7554884294999908, "p90": 0.7917447106999986, "p95": 0.7962767458499996, "p99": 0.7999023739700004, "values": [ 0.8008087810000006, 0.710168077999981 ] }, "throughput": { "unit": "samples/s", "value": 5.294588035778844 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2845.749248, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 3, "total": 2.0701137290000133, "mean": 0.6900379096666711, "stdev": 0.007248103654729414, "p50": 0.6928677590000234, "p90": 0.696297607800011, "p95": 0.6967263389000096, "p99": 0.6970693237800083, "values": [ 0.6928677590000234, 0.697155070000008, 0.680090899999982 ] }, "throughput": { "unit": "samples/s", "value": 8.695174447587021 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
{ "name": "cpu_training_transformers_text-classification_FacebookAI/roberta-base", "backend": { "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-classification", "library": "transformers", "model_type": "roberta", "model": "FacebookAI/roberta-base", "processor": "FacebookAI/roberta-base", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }, "print_report": true, "log_report": true }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2822.664192, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7057864399999971, 0.5932530329999963, 0.598606932999985, 0.5777662120000286, 0.6042163749999645 ], "count": 5, "total": 3.0796289929999716, "mean": 0.6159257985999943, "p50": 0.598606932999985, "p90": 0.6651584139999841, "p95": 0.6854724269999906, "p99": 0.7017236373999958, "stdev": 0.04578689098195848, "stdev_": 7.433832303506778 }, "throughput": { "unit": "samples/s", "value": 16.23572193717182 }, "energy": { "unit": "kWh", "cpu": 0.0001275407137277774, "ram": 0.000005331074583961886, "gpu": 0, "total": 0.00013287178831173927 }, "efficiency": { "unit": "samples/kWh", "value": 75260.52089054706 } }, "warmup": { "memory": { "unit": "MB", "max_ram": 2822.664192, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7057864399999971, 0.5932530329999963 ], "count": 2, "total": 1.2990394729999934, "mean": 0.6495197364999967, "p50": 0.6495197364999967, "p90": 0.6945330992999971, "p95": 0.700159769649997, "p99": 0.7046611059299971, "stdev": 0.0562667035000004, "stdev_": 8.66281659171733 }, "throughput": { "unit": "samples/s", "value": 6.158396389237389 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2822.664192, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.598606932999985, 0.5777662120000286, 0.6042163749999645 ], "count": 3, "total": 1.7805895199999782, "mean": 0.5935298399999928, "p50": 0.598606932999985, "p90": 0.6030944865999686, "p95": 0.6036554307999665, "p99": 0.6041041861599649, "stdev": 0.011379380055718386, "stdev_": 1.9172380710830859 }, "throughput": { "unit": "samples/s", "value": 10.109011536808449 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
null
null
cpu_training_transformers_text-classification_FacebookAI/roberta-base
{ "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-classification", "library": "transformers", "model_type": "roberta", "model": "FacebookAI/roberta-base", "processor": "FacebookAI/roberta-base", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }
{ "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }
{ "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }
{ "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }
true
true
null
null
null
null
null
null
null
null
null
null
null
null
{ "memory": { "unit": "MB", "max_ram": 2822.664192, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7057864399999971, 0.5932530329999963, 0.598606932999985, 0.5777662120000286, 0.6042163749999645 ], "count": 5, "total": 3.0796289929999716, "mean": 0.6159257985999943, "p50": 0.598606932999985, "p90": 0.6651584139999841, "p95": 0.6854724269999906, "p99": 0.7017236373999958, "stdev": 0.04578689098195848, "stdev_": 7.433832303506778 }, "throughput": { "unit": "samples/s", "value": 16.23572193717182 }, "energy": { "unit": "kWh", "cpu": 0.0001275407137277774, "ram": 0.000005331074583961886, "gpu": 0, "total": 0.00013287178831173927 }, "efficiency": { "unit": "samples/kWh", "value": 75260.52089054706 } }
{ "memory": { "unit": "MB", "max_ram": 2822.664192, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7057864399999971, 0.5932530329999963 ], "count": 2, "total": 1.2990394729999934, "mean": 0.6495197364999967, "p50": 0.6495197364999967, "p90": 0.6945330992999971, "p95": 0.700159769649997, "p99": 0.7046611059299971, "stdev": 0.0562667035000004, "stdev_": 8.66281659171733 }, "throughput": { "unit": "samples/s", "value": 6.158396389237389 }, "energy": null, "efficiency": null }
{ "memory": { "unit": "MB", "max_ram": 2822.664192, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.598606932999985, 0.5777662120000286, 0.6042163749999645 ], "count": 3, "total": 1.7805895199999782, "mean": 0.5935298399999928, "p50": 0.598606932999985, "p90": 0.6030944865999686, "p95": 0.6036554307999665, "p99": 0.6041041861599649, "stdev": 0.011379380055718386, "stdev_": 1.9172380710830859 }, "throughput": { "unit": "samples/s", "value": 10.109011536808449 }, "energy": null, "efficiency": null }
{ "name": "cpu_training_transformers_text-classification_FacebookAI/roberta-base", "backend": { "name": "pytorch", "version": "2.3.0+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-classification", "model": "FacebookAI/roberta-base", "library": "transformers", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "hub_kwargs": { "revision": "main", "force_download": false, "local_files_only": false, "trust_remote_code": false }, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": false }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": "error", "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.346304, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.14", "optimum_benchmark_version": "0.2.0", "optimum_benchmark_commit": "2e77e02d1fd3ab0d2e788c3d89c12299219a25e8", "transformers_version": "4.40.2", "transformers_commit": null, "accelerate_version": "0.30.0", "accelerate_commit": null, "diffusers_version": "0.27.2", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "0.9.16", "timm_commit": null, "peft_version": null, "peft_commit": null } }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2826.752, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 5, "total": 2.882509665999976, "mean": 0.5765019331999952, "stdev": 0.04978939696949581, "p50": 0.5569985249999831, "p90": 0.6300386333999881, "p95": 0.6525100941999881, "p99": 0.670487262839988, "values": [ 0.674981554999988, 0.5569985249999831, 0.5424031590000027, 0.5626242509999884, 0.5455021760000136 ] }, "throughput": { "unit": "samples/s", "value": 17.34599560576128 }, "energy": null, "efficiency": null }, "warmup": { "memory": { "unit": "MB", "max_ram": 2826.752, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 2, "total": 1.2319800799999712, "mean": 0.6159900399999856, "stdev": 0.05899151500000244, "p50": 0.6159900399999856, "p90": 0.6631832519999875, "p95": 0.6690824034999878, "p99": 0.673801724699988, "values": [ 0.674981554999988, 0.5569985249999831 ] }, "throughput": { "unit": "samples/s", "value": 6.493611487614465 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2826.752, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 3, "total": 1.6505295860000047, "mean": 0.5501765286666682, "stdev": 0.00889233078021606, "p50": 0.5455021760000136, "p90": 0.5591998359999935, "p95": 0.5609120434999909, "p99": 0.5622818094999888, "values": [ 0.5424031590000027, 0.5626242509999884, 0.5455021760000136 ] }, "throughput": { "unit": "samples/s", "value": 10.905590637500968 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
{ "name": "cpu_training_transformers_text-generation_openai-community/gpt2", "backend": { "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-generation", "library": "transformers", "model_type": "gpt2", "model": "openai-community/gpt2", "processor": "openai-community/gpt2", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }, "print_report": true, "log_report": true }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2860.118016, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7623160859999984, 0.6390285900000094, 0.6372368130000154, 0.6580188159999807, 0.6341149209999912 ], "count": 5, "total": 3.3307152259999953, "mean": 0.666143045199999, "p50": 0.6390285900000094, "p90": 0.7205971779999913, "p95": 0.7414566319999949, "p99": 0.7581441951999978, "stdev": 0.048809469485696144, "stdev_": 7.327175422366207 }, "throughput": { "unit": "samples/s", "value": 15.011790743829888 }, "energy": { "unit": "kWh", "cpu": 0.00013672229584999927, "ram": 0.0000057148790530788785, "gpu": 0, "total": 0.00014243717490307814 }, "efficiency": { "unit": "samples/kWh", "value": 70206.39104085386 } }, "warmup": { "memory": { "unit": "MB", "max_ram": 2860.118016, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7623160859999984, 0.6390285900000094 ], "count": 2, "total": 1.401344676000008, "mean": 0.700672338000004, "p50": 0.700672338000004, "p90": 0.7499873363999996, "p95": 0.756151711199999, "p99": 0.7610832110399985, "stdev": 0.0616437479999945, "stdev_": 8.797799578609048 }, "throughput": { "unit": "samples/s", "value": 5.708802507342566 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2860.118016, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.6372368130000154, 0.6580188159999807, 0.6341149209999912 ], "count": 3, "total": 1.9293705499999874, "mean": 0.6431235166666625, "p50": 0.6372368130000154, "p90": 0.6538624153999877, "p95": 0.6559406156999842, "p99": 0.6576031759399814, "stdev": 0.010609398634758295, "stdev_": 1.6496673438015883 }, "throughput": { "unit": "samples/s", "value": 9.329467582056811 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
null
null
cpu_training_transformers_text-generation_openai-community/gpt2
{ "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-generation", "library": "transformers", "model_type": "gpt2", "model": "openai-community/gpt2", "processor": "openai-community/gpt2", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }
{ "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }
{ "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }
{ "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }
true
true
null
null
null
null
null
null
null
null
null
null
null
null
{ "memory": { "unit": "MB", "max_ram": 2860.118016, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7623160859999984, 0.6390285900000094, 0.6372368130000154, 0.6580188159999807, 0.6341149209999912 ], "count": 5, "total": 3.3307152259999953, "mean": 0.666143045199999, "p50": 0.6390285900000094, "p90": 0.7205971779999913, "p95": 0.7414566319999949, "p99": 0.7581441951999978, "stdev": 0.048809469485696144, "stdev_": 7.327175422366207 }, "throughput": { "unit": "samples/s", "value": 15.011790743829888 }, "energy": { "unit": "kWh", "cpu": 0.00013672229584999927, "ram": 0.0000057148790530788785, "gpu": 0, "total": 0.00014243717490307814 }, "efficiency": { "unit": "samples/kWh", "value": 70206.39104085386 } }
{ "memory": { "unit": "MB", "max_ram": 2860.118016, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.7623160859999984, 0.6390285900000094 ], "count": 2, "total": 1.401344676000008, "mean": 0.700672338000004, "p50": 0.700672338000004, "p90": 0.7499873363999996, "p95": 0.756151711199999, "p99": 0.7610832110399985, "stdev": 0.0616437479999945, "stdev_": 8.797799578609048 }, "throughput": { "unit": "samples/s", "value": 5.708802507342566 }, "energy": null, "efficiency": null }
{ "memory": { "unit": "MB", "max_ram": 2860.118016, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 0.6372368130000154, 0.6580188159999807, 0.6341149209999912 ], "count": 3, "total": 1.9293705499999874, "mean": 0.6431235166666625, "p50": 0.6372368130000154, "p90": 0.6538624153999877, "p95": 0.6559406156999842, "p99": 0.6576031759399814, "stdev": 0.010609398634758295, "stdev_": 1.6496673438015883 }, "throughput": { "unit": "samples/s", "value": 9.329467582056811 }, "energy": null, "efficiency": null }
{ "name": "cpu_training_transformers_text-generation_openai-community/gpt2", "backend": { "name": "pytorch", "version": "2.3.0+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "text-generation", "model": "openai-community/gpt2", "library": "transformers", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "hub_kwargs": { "revision": "main", "force_download": false, "local_files_only": false, "trust_remote_code": false }, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": false }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": "error", "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.346304, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.14", "optimum_benchmark_version": "0.2.0", "optimum_benchmark_commit": "2e77e02d1fd3ab0d2e788c3d89c12299219a25e8", "transformers_version": "4.40.2", "transformers_commit": null, "accelerate_version": "0.30.0", "accelerate_commit": null, "diffusers_version": "0.27.2", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "0.9.16", "timm_commit": null, "peft_version": null, "peft_commit": null } }
{ "overall": { "memory": { "unit": "MB", "max_ram": 2827.354112, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 5, "total": 3.1791685380000274, "mean": 0.6358337076000055, "stdev": 0.07846941233493662, "p50": 0.596941285000014, "p90": 0.7161873142000047, "p95": 0.7544328206000045, "p99": 0.7850292257200044, "values": [ 0.7926783270000044, 0.6014507950000052, 0.596941285000014, 0.593667699000008, 0.5944304319999958 ] }, "throughput": { "unit": "samples/s", "value": 15.727382616668173 }, "energy": null, "efficiency": null }, "warmup": { "memory": { "unit": "MB", "max_ram": 2827.354112, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 2, "total": 1.3941291220000096, "mean": 0.6970645610000048, "stdev": 0.0956137659999996, "p50": 0.6970645610000048, "p90": 0.7735555738000045, "p95": 0.7831169504000044, "p99": 0.7907660516800044, "values": [ 0.7926783270000044, 0.6014507950000052 ] }, "throughput": { "unit": "samples/s", "value": 5.738349392288174 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 2827.354112, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 3, "total": 1.7850394160000178, "mean": 0.5950131386666726, "stdev": 0.0013985114990352936, "p50": 0.5944304319999958, "p90": 0.5964391144000103, "p95": 0.5966901997000121, "p99": 0.5968910679400136, "values": [ 0.596941285000014, 0.593667699000008, 0.5944304319999958 ] }, "throughput": { "unit": "samples/s", "value": 10.083810944822195 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
{ "name": "cpu_training_transformers_token-classification_microsoft/deberta-v3-base", "backend": { "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "token-classification", "library": "transformers", "model_type": "deberta-v2", "model": "microsoft/deberta-v3-base", "processor": "microsoft/deberta-v3-base", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }, "print_report": true, "log_report": true }
{ "overall": { "memory": { "unit": "MB", "max_ram": 4415.823872, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.3704072399999632, 1.1449187460000303, 1.1957673840000211, 1.1325666239999919, 1.1506578839999975 ], "count": 5, "total": 5.994317878000004, "mean": 1.1988635756000008, "p50": 1.1506578839999975, "p90": 1.3005512975999864, "p95": 1.3354792687999748, "p99": 1.3634216457599655, "stdev": 0.0883922330669942, "stdev_": 7.373001804876433 }, "throughput": { "unit": "samples/s", "value": 8.341232650258187 }, "energy": { "unit": "kWh", "cpu": 0.00024116838382777815, "ram": 0.000010081131323350812, "gpu": 0, "total": 0.00025124951515112894 }, "efficiency": { "unit": "samples/kWh", "value": 39801.071830864654 } }, "warmup": { "memory": { "unit": "MB", "max_ram": 4415.823872, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.3704072399999632, 1.1449187460000303 ], "count": 2, "total": 2.5153259859999935, "mean": 1.2576629929999967, "p50": 1.2576629929999967, "p90": 1.34785839059997, "p95": 1.3591328152999664, "p99": 1.368152355059964, "stdev": 0.11274424699996644, "stdev_": 8.964583328561591 }, "throughput": { "unit": "samples/s", "value": 3.1805022667149516 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 4415.823872, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.1957673840000211, 1.1325666239999919, 1.1506578839999975 ], "count": 3, "total": 3.4789918920000105, "mean": 1.1596639640000035, "p50": 1.1506578839999975, "p90": 1.1867454840000164, "p95": 1.1912564340000187, "p99": 1.1948651940000206, "stdev": 0.026575880341269432, "stdev_": 2.291688037765857 }, "throughput": { "unit": "samples/s", "value": 5.173912604220564 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null
null
null
cpu_training_transformers_token-classification_microsoft/deberta-v3-base
{ "name": "pytorch", "version": "2.5.1+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "token-classification", "library": "transformers", "model_type": "deberta-v2", "model": "microsoft/deberta-v3-base", "processor": "microsoft/deberta-v3-base", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "model_kwargs": {}, "processor_kwargs": {}, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }
{ "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "evaluation_strategy": "no", "eval_strategy": "no", "save_strategy": "no", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": true }
{ "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": null, "numactl": false, "numactl_kwargs": {}, "start_method": "spawn" }
{ "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.342208, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1025-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.15", "optimum_benchmark_version": "0.5.0.dev0", "optimum_benchmark_commit": "78351930eda4599a64ff2da35e08ab39722c146a", "transformers_version": "4.46.3", "transformers_commit": null, "accelerate_version": "1.1.1", "accelerate_commit": null, "diffusers_version": "0.31.0", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "1.0.11", "timm_commit": null, "peft_version": null, "peft_commit": null }
true
true
null
null
null
null
null
null
null
null
null
null
null
null
{ "memory": { "unit": "MB", "max_ram": 4415.823872, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.3704072399999632, 1.1449187460000303, 1.1957673840000211, 1.1325666239999919, 1.1506578839999975 ], "count": 5, "total": 5.994317878000004, "mean": 1.1988635756000008, "p50": 1.1506578839999975, "p90": 1.3005512975999864, "p95": 1.3354792687999748, "p99": 1.3634216457599655, "stdev": 0.0883922330669942, "stdev_": 7.373001804876433 }, "throughput": { "unit": "samples/s", "value": 8.341232650258187 }, "energy": { "unit": "kWh", "cpu": 0.00024116838382777815, "ram": 0.000010081131323350812, "gpu": 0, "total": 0.00025124951515112894 }, "efficiency": { "unit": "samples/kWh", "value": 39801.071830864654 } }
{ "memory": { "unit": "MB", "max_ram": 4415.823872, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.3704072399999632, 1.1449187460000303 ], "count": 2, "total": 2.5153259859999935, "mean": 1.2576629929999967, "p50": 1.2576629929999967, "p90": 1.34785839059997, "p95": 1.3591328152999664, "p99": 1.368152355059964, "stdev": 0.11274424699996644, "stdev_": 8.964583328561591 }, "throughput": { "unit": "samples/s", "value": 3.1805022667149516 }, "energy": null, "efficiency": null }
{ "memory": { "unit": "MB", "max_ram": 4415.823872, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "values": [ 1.1957673840000211, 1.1325666239999919, 1.1506578839999975 ], "count": 3, "total": 3.4789918920000105, "mean": 1.1596639640000035, "p50": 1.1506578839999975, "p90": 1.1867454840000164, "p95": 1.1912564340000187, "p99": 1.1948651940000206, "stdev": 0.026575880341269432, "stdev_": 2.291688037765857 }, "throughput": { "unit": "samples/s", "value": 5.173912604220564 }, "energy": null, "efficiency": null }
{ "name": "cpu_training_transformers_token-classification_microsoft/deberta-v3-base", "backend": { "name": "pytorch", "version": "2.3.0+cpu", "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend", "task": "token-classification", "model": "microsoft/deberta-v3-base", "library": "transformers", "device": "cpu", "device_ids": null, "seed": 42, "inter_op_num_threads": null, "intra_op_num_threads": null, "hub_kwargs": { "revision": "main", "force_download": false, "local_files_only": false, "trust_remote_code": false }, "no_weights": true, "device_map": null, "torch_dtype": null, "eval_mode": true, "to_bettertransformer": false, "low_cpu_mem_usage": null, "attn_implementation": null, "cache_implementation": null, "autocast_enabled": false, "autocast_dtype": null, "torch_compile": false, "torch_compile_target": "forward", "torch_compile_config": {}, "quantization_scheme": null, "quantization_config": {}, "deepspeed_inference": false, "deepspeed_inference_config": {}, "peft_type": null, "peft_config": {} }, "scenario": { "name": "training", "_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario", "max_steps": 5, "warmup_steps": 2, "dataset_shapes": { "dataset_size": 500, "sequence_length": 16, "num_choices": 1 }, "training_arguments": { "per_device_train_batch_size": 2, "gradient_accumulation_steps": 1, "output_dir": "./trainer_output", "do_train": true, "use_cpu": false, "max_steps": 5, "do_eval": false, "do_predict": false, "report_to": "none", "skip_memory_metrics": true, "ddp_find_unused_parameters": false }, "latency": true, "memory": true, "energy": false }, "launcher": { "name": "process", "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher", "device_isolation": false, "device_isolation_action": "error", "start_method": "spawn" }, "environment": { "cpu": " AMD EPYC 7763 64-Core Processor", "cpu_count": 4, "cpu_ram_mb": 16757.346304, "system": "Linux", "machine": "x86_64", "platform": "Linux-6.5.0-1018-azure-x86_64-with-glibc2.35", "processor": "x86_64", "python_version": "3.10.14", "optimum_benchmark_version": "0.2.0", "optimum_benchmark_commit": "2e77e02d1fd3ab0d2e788c3d89c12299219a25e8", "transformers_version": "4.40.2", "transformers_commit": null, "accelerate_version": "0.30.0", "accelerate_commit": null, "diffusers_version": "0.27.2", "diffusers_commit": null, "optimum_version": null, "optimum_commit": null, "timm_version": "0.9.16", "timm_commit": null, "peft_version": null, "peft_commit": null } }
{ "overall": { "memory": { "unit": "MB", "max_ram": 4374.970368, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 5, "total": 5.583659903000068, "mean": 1.1167319806000138, "stdev": 0.07853258776030796, "p50": 1.0731834320000075, "p90": 1.2025418994000006, "p95": 1.2374836681999908, "p99": 1.265437083239983, "values": [ 1.2724254369999812, 1.0731834320000075, 1.0977165930000297, 1.0711078369999996, 1.0692266040000504 ] }, "throughput": { "unit": "samples/s", "value": 8.954700119385008 }, "energy": null, "efficiency": null }, "warmup": { "memory": { "unit": "MB", "max_ram": 4374.970368, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 2, "total": 2.3456088689999888, "mean": 1.1728044344999944, "stdev": 0.09962100249998684, "p50": 1.1728044344999944, "p90": 1.252501236499984, "p95": 1.2624633367499825, "p99": 1.2704330169499816, "values": [ 1.2724254369999812, 1.0731834320000075 ] }, "throughput": { "unit": "samples/s", "value": 3.4106283045436583 }, "energy": null, "efficiency": null }, "train": { "memory": { "unit": "MB", "max_ram": 4374.970368, "max_global_vram": null, "max_process_vram": null, "max_reserved": null, "max_allocated": null }, "latency": { "unit": "s", "count": 3, "total": 3.2380510340000797, "mean": 1.0793503446666932, "stdev": 0.01300958794585394, "p50": 1.0711078369999996, "p90": 1.0923948418000236, "p95": 1.0950557174000266, "p99": 1.097184417880029, "values": [ 1.0977165930000297, 1.0711078369999996, 1.0692266040000504 ] }, "throughput": { "unit": "samples/s", "value": 5.558899415419021 }, "energy": null, "efficiency": null } }
null
null
null
null
null
null
null
null
null
null