Dataset Preview
Full Screen Viewer
Full Screen
The full dataset viewer is not available (click to read why). Only showing a preview of the rows.
The dataset generation failed
Error code: DatasetGenerationError Exception: ArrowNotImplementedError Message: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. Traceback: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1870, in _prepare_split_single writer.write_table(table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 620, in write_table self._build_writer(inferred_schema=pa_table.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1886, in _prepare_split_single num_examples, num_bytes = writer.finalize() File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 639, in finalize self._build_writer(self.schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/arrow_writer.py", line 441, in _build_writer self.pa_writer = self._WRITER_CLASS(self.stream, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/pyarrow/parquet/core.py", line 1010, in __init__ self.writer = _parquet.ParquetWriter( File "pyarrow/_parquet.pyx", line 2157, in pyarrow._parquet.ParquetWriter.__cinit__ File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowNotImplementedError: Cannot write struct type 'model_kwargs' with no child field to Parquet. Consider adding a dummy child field. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1417, in compute_config_parquet_and_info_response parquet_operations = convert_to_parquet(builder) File "/src/services/worker/src/worker/job_runners/config/parquet_and_info.py", line 1049, in convert_to_parquet builder.download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 924, in download_and_prepare self._download_and_prepare( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1000, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1741, in _prepare_split for job_id, done, content in self._prepare_split_single( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/builder.py", line 1897, in _prepare_split_single raise DatasetGenerationError("An error occurred while generating the dataset") from e datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
config
dict | report
dict | name
string | backend
dict | scenario
dict | launcher
dict | environment
dict | print_report
bool | log_report
bool | overall
dict | warmup
dict | train
dict |
---|---|---|---|---|---|---|---|---|---|---|---|
{
"name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
"backend": {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "fill-mask",
"library": "transformers",
"model_type": "bert",
"model": "google-bert/bert-base-uncased",
"processor": "google-bert/bert-base-uncased",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
},
"print_report": true,
"log_report": true
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1307.992064,
"max_global_vram": 3176.660992,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"values": [
0.33647512817382813,
0.044434432983398435,
0.04337868881225586,
0.04369715118408203,
0.043215873718261716
],
"count": 5,
"total": 0.5112012748718262,
"mean": 0.10224025497436524,
"p50": 0.04369715118408203,
"p90": 0.2196588500976563,
"p95": 0.2780669891357421,
"p99": 0.3247935003662109,
"stdev": 0.11711818427037018,
"stdev_": 114.55192898309505
},
"throughput": {
"unit": "samples/s",
"value": 97.80883275875344
},
"energy": {
"unit": "kWh",
"cpu": 0.000008357478575694207,
"ram": 0.000004559703033691927,
"gpu": 0.000014441955997999967,
"total": 0.000027359137607386098
},
"efficiency": {
"unit": "samples/kWh",
"value": 365508.5969266925
}
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1307.992064,
"max_global_vram": 3176.660992,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"values": [
0.33647512817382813,
0.044434432983398435
],
"count": 2,
"total": 0.3809095611572266,
"mean": 0.1904547805786133,
"p50": 0.1904547805786133,
"p90": 0.30727105865478516,
"p95": 0.32187309341430664,
"p99": 0.3335547212219238,
"stdev": 0.14602034759521484,
"stdev_": 76.66930026728448
},
"throughput": {
"unit": "samples/s",
"value": 21.002360706555933
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1307.992064,
"max_global_vram": 3176.660992,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"values": [
0.04337868881225586,
0.04369715118408203,
0.043215873718261716
],
"count": 3,
"total": 0.1302917137145996,
"mean": 0.04343057123819987,
"p50": 0.04337868881225586,
"p90": 0.0436334587097168,
"p95": 0.043665304946899415,
"p99": 0.04369078193664551,
"stdev": 0.0001998763607299654,
"stdev_": 0.4602204277574912
},
"throughput": {
"unit": "samples/s",
"value": 138.15153310078108
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
null | null | cuda_training_transformers_fill-mask_google-bert/bert-base-uncased | {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "fill-mask",
"library": "transformers",
"model_type": "bert",
"model": "google-bert/bert-base-uncased",
"processor": "google-bert/bert-base-uncased",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
} | true | true | null | null | null |
null | null | null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1307.992064,
"max_global_vram": 3176.660992,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"values": [
0.33647512817382813,
0.044434432983398435,
0.04337868881225586,
0.04369715118408203,
0.043215873718261716
],
"count": 5,
"total": 0.5112012748718262,
"mean": 0.10224025497436524,
"p50": 0.04369715118408203,
"p90": 0.2196588500976563,
"p95": 0.2780669891357421,
"p99": 0.3247935003662109,
"stdev": 0.11711818427037018,
"stdev_": 114.55192898309505
},
"throughput": {
"unit": "samples/s",
"value": 97.80883275875344
},
"energy": {
"unit": "kWh",
"cpu": 0.000008357478575694207,
"ram": 0.000004559703033691927,
"gpu": 0.000014441955997999967,
"total": 0.000027359137607386098
},
"efficiency": {
"unit": "samples/kWh",
"value": 365508.5969266925
}
} | {
"memory": {
"unit": "MB",
"max_ram": 1307.992064,
"max_global_vram": 3176.660992,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"values": [
0.33647512817382813,
0.044434432983398435
],
"count": 2,
"total": 0.3809095611572266,
"mean": 0.1904547805786133,
"p50": 0.1904547805786133,
"p90": 0.30727105865478516,
"p95": 0.32187309341430664,
"p99": 0.3335547212219238,
"stdev": 0.14602034759521484,
"stdev_": 76.66930026728448
},
"throughput": {
"unit": "samples/s",
"value": 21.002360706555933
},
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 1307.992064,
"max_global_vram": 3176.660992,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"values": [
0.04337868881225586,
0.04369715118408203,
0.043215873718261716
],
"count": 3,
"total": 0.1302917137145996,
"mean": 0.04343057123819987,
"p50": 0.04337868881225586,
"p90": 0.0436334587097168,
"p95": 0.043665304946899415,
"p99": 0.04369078193664551,
"stdev": 0.0001998763607299654,
"stdev_": 0.4602204277574912
},
"throughput": {
"unit": "samples/s",
"value": 138.15153310078108
},
"energy": null,
"efficiency": null
} |
{
"name": "cuda_training_transformers_fill-mask_google-bert/bert-base-uncased",
"backend": {
"name": "pytorch",
"version": "2.2.2",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "fill-mask",
"model": "google-bert/bert-base-uncased",
"library": "transformers",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"hub_kwargs": {
"revision": "main",
"force_download": false,
"local_files_only": false,
"trust_remote_code": false
},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": false
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.29792,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.214-202.855.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.2.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.40.2",
"transformers_commit": null,
"accelerate_version": "0.30.0",
"accelerate_commit": null,
"diffusers_version": "0.27.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "0.9.16",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1063.8336,
"max_global_vram": 3169.32096,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"count": 5,
"total": 0.7448790740966797,
"mean": 0.14897581481933594,
"stdev": 0.2054173633207176,
"p50": 0.04632883071899414,
"p90": 0.35471870422363283,
"p95": 0.4572641067504882,
"p99": 0.5393004287719726,
"values": [
0.5598095092773437,
0.04708249664306641,
0.04632883071899414,
0.04576665496826172,
0.04589158248901367
]
},
"throughput": {
"unit": "samples/s",
"value": 67.1249894630687
},
"energy": null,
"efficiency": null
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1063.8336,
"max_global_vram": 3169.32096,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"count": 2,
"total": 0.6068920059204101,
"mean": 0.3034460029602051,
"stdev": 0.25636350631713867,
"p50": 0.3034460029602051,
"p90": 0.508536808013916,
"p95": 0.5341731586456299,
"p99": 0.554682239151001,
"values": [
0.5598095092773437,
0.04708249664306641
]
},
"throughput": {
"unit": "samples/s",
"value": 13.181916917602548
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1063.8336,
"max_global_vram": 3169.32096,
"max_process_vram": 0,
"max_reserved": 2520.776704,
"max_allocated": 2211.86048
},
"latency": {
"unit": "s",
"count": 3,
"total": 0.13798706817626955,
"mean": 0.045995689392089846,
"stdev": 0.00024102431292157434,
"p50": 0.04589158248901367,
"p90": 0.04624138107299805,
"p95": 0.0462851058959961,
"p99": 0.04632008575439454,
"values": [
0.04632883071899414,
0.04576665496826172,
0.04589158248901367
]
},
"throughput": {
"unit": "samples/s",
"value": 130.4470066499722
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
{
"name": "cuda_training_transformers_image-classification_google/vit-base-patch16-224",
"backend": {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "image-classification",
"library": "transformers",
"model_type": "vit",
"model": "google/vit-base-patch16-224",
"processor": "google/vit-base-patch16-224",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
},
"print_report": true,
"log_report": true
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1729.712128,
"max_global_vram": 2618.81856,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1755.291648
},
"latency": {
"unit": "s",
"values": [
0.336110595703125,
0.04012134552001953,
0.040414207458496096,
0.039959552764892575,
0.039913471221923826
],
"count": 5,
"total": 0.4965191726684571,
"mean": 0.09930383453369142,
"p50": 0.04012134552001953,
"p90": 0.21783204040527349,
"p95": 0.27697131805419917,
"p99": 0.32428274017333986,
"stdev": 0.11840351037990764,
"stdev_": 119.23357334175869
},
"throughput": {
"unit": "samples/s",
"value": 100.70104590580779
},
"energy": {
"unit": "kWh",
"cpu": 0.00000984043112291586,
"ram": 0.000005351073682594105,
"gpu": 0.00001915251532200006,
"total": 0.000034344020127510026
},
"efficiency": {
"unit": "samples/kWh",
"value": 291171.5041766431
}
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1729.712128,
"max_global_vram": 2618.81856,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1755.291648
},
"latency": {
"unit": "s",
"values": [
0.336110595703125,
0.04012134552001953
],
"count": 2,
"total": 0.3762319412231445,
"mean": 0.18811597061157226,
"p50": 0.18811597061157226,
"p90": 0.30651167068481444,
"p95": 0.3213111331939697,
"p99": 0.333150703201294,
"stdev": 0.14799462509155276,
"stdev_": 78.67201525230236
},
"throughput": {
"unit": "samples/s",
"value": 21.26347904963011
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1729.712128,
"max_global_vram": 2618.81856,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1755.291648
},
"latency": {
"unit": "s",
"values": [
0.040414207458496096,
0.039959552764892575,
0.039913471221923826
],
"count": 3,
"total": 0.1202872314453125,
"mean": 0.04009574381510417,
"p50": 0.039959552764892575,
"p90": 0.040323276519775395,
"p95": 0.040368741989135745,
"p99": 0.040405114364624024,
"stdev": 0.0002259722641866211,
"stdev_": 0.5635816739768194
},
"throughput": {
"unit": "samples/s",
"value": 149.64181803605263
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
null | null | cuda_training_transformers_image-classification_google/vit-base-patch16-224 | {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "image-classification",
"library": "transformers",
"model_type": "vit",
"model": "google/vit-base-patch16-224",
"processor": "google/vit-base-patch16-224",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
} | true | true | null | null | null |
null | null | null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1729.712128,
"max_global_vram": 2618.81856,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1755.291648
},
"latency": {
"unit": "s",
"values": [
0.336110595703125,
0.04012134552001953,
0.040414207458496096,
0.039959552764892575,
0.039913471221923826
],
"count": 5,
"total": 0.4965191726684571,
"mean": 0.09930383453369142,
"p50": 0.04012134552001953,
"p90": 0.21783204040527349,
"p95": 0.27697131805419917,
"p99": 0.32428274017333986,
"stdev": 0.11840351037990764,
"stdev_": 119.23357334175869
},
"throughput": {
"unit": "samples/s",
"value": 100.70104590580779
},
"energy": {
"unit": "kWh",
"cpu": 0.00000984043112291586,
"ram": 0.000005351073682594105,
"gpu": 0.00001915251532200006,
"total": 0.000034344020127510026
},
"efficiency": {
"unit": "samples/kWh",
"value": 291171.5041766431
}
} | {
"memory": {
"unit": "MB",
"max_ram": 1729.712128,
"max_global_vram": 2618.81856,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1755.291648
},
"latency": {
"unit": "s",
"values": [
0.336110595703125,
0.04012134552001953
],
"count": 2,
"total": 0.3762319412231445,
"mean": 0.18811597061157226,
"p50": 0.18811597061157226,
"p90": 0.30651167068481444,
"p95": 0.3213111331939697,
"p99": 0.333150703201294,
"stdev": 0.14799462509155276,
"stdev_": 78.67201525230236
},
"throughput": {
"unit": "samples/s",
"value": 21.26347904963011
},
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 1729.712128,
"max_global_vram": 2618.81856,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1755.291648
},
"latency": {
"unit": "s",
"values": [
0.040414207458496096,
0.039959552764892575,
0.039913471221923826
],
"count": 3,
"total": 0.1202872314453125,
"mean": 0.04009574381510417,
"p50": 0.039959552764892575,
"p90": 0.040323276519775395,
"p95": 0.040368741989135745,
"p99": 0.040405114364624024,
"stdev": 0.0002259722641866211,
"stdev_": 0.5635816739768194
},
"throughput": {
"unit": "samples/s",
"value": 149.64181803605263
},
"energy": null,
"efficiency": null
} |
{
"name": "cuda_training_transformers_image-classification_google/vit-base-patch16-224",
"backend": {
"name": "pytorch",
"version": "2.2.2",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "image-classification",
"model": "google/vit-base-patch16-224",
"library": "transformers",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"hub_kwargs": {
"revision": "main",
"force_download": false,
"local_files_only": false,
"trust_remote_code": false
},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": false
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.29792,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.214-202.855.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.2.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.40.2",
"transformers_commit": null,
"accelerate_version": "0.30.0",
"accelerate_commit": null,
"diffusers_version": "0.27.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "0.9.16",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1446.354944,
"max_global_vram": 2628.255744,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1756.126208
},
"latency": {
"unit": "s",
"count": 5,
"total": 0.48406525039672854,
"mean": 0.09681305007934571,
"stdev": 0.1110534407118009,
"p50": 0.04146995162963867,
"p90": 0.20794796142578126,
"p95": 0.26343380432128904,
"p99": 0.3078224786376953,
"values": [
0.3189196472167969,
0.04103168106079102,
0.041490432739257815,
0.04146995162963867,
0.04115353775024414
]
},
"throughput": {
"unit": "samples/s",
"value": 103.29185984538483
},
"energy": null,
"efficiency": null
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1446.354944,
"max_global_vram": 2628.255744,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1756.126208
},
"latency": {
"unit": "s",
"count": 2,
"total": 0.3599513282775879,
"mean": 0.17997566413879396,
"stdev": 0.13894398307800293,
"p50": 0.17997566413879396,
"p90": 0.2911308506011963,
"p95": 0.30502524890899657,
"p99": 0.31614076755523685,
"values": [
0.3189196472167969,
0.04103168106079102
]
},
"throughput": {
"unit": "samples/s",
"value": 22.225227055782792
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1446.354944,
"max_global_vram": 2628.255744,
"max_process_vram": 0,
"max_reserved": 1956.642816,
"max_allocated": 1756.126208
},
"latency": {
"unit": "s",
"count": 3,
"total": 0.12411392211914063,
"mean": 0.041371307373046874,
"stdev": 0.00015421321911462263,
"p50": 0.04146995162963867,
"p90": 0.04148633651733399,
"p95": 0.0414883846282959,
"p99": 0.041490023117065435,
"values": [
0.041490432739257815,
0.04146995162963867,
0.04115353775024414
]
},
"throughput": {
"unit": "samples/s",
"value": 145.02804917180256
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
{
"name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
"backend": {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "multiple-choice",
"library": "transformers",
"model_type": "roberta",
"model": "FacebookAI/roberta-base",
"processor": "FacebookAI/roberta-base",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
},
"print_report": true,
"log_report": true
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1344.6144,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"values": [
0.35371826171875,
0.046911487579345705,
0.046137344360351565,
0.046911487579345705,
0.04642201614379883
],
"count": 5,
"total": 0.5401005973815918,
"mean": 0.10802011947631836,
"p50": 0.046911487579345705,
"p90": 0.2309955520629883,
"p95": 0.2923569068908691,
"p99": 0.3414459907531738,
"stdev": 0.1228494290391605,
"stdev_": 113.72828472578502
},
"throughput": {
"unit": "samples/s",
"value": 92.57534659728215
},
"energy": {
"unit": "kWh",
"cpu": 0.0000087234176951385,
"ram": 0.000004761826612293075,
"gpu": 0.000013701955406000051,
"total": 0.000027187199713431626
},
"efficiency": {
"unit": "samples/kWh",
"value": 367820.15453616495
}
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1344.6144,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"values": [
0.35371826171875,
0.046911487579345705
],
"count": 2,
"total": 0.4006297492980957,
"mean": 0.20031487464904785,
"p50": 0.20031487464904785,
"p90": 0.32303758430480956,
"p95": 0.33837792301177977,
"p99": 0.3506501939773559,
"stdev": 0.15340338706970214,
"stdev_": 76.58112625857927
},
"throughput": {
"unit": "samples/s",
"value": 19.968562030193763
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1344.6144,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"values": [
0.046137344360351565,
0.046911487579345705,
0.04642201614379883
],
"count": 3,
"total": 0.13947084808349608,
"mean": 0.046490282694498695,
"p50": 0.04642201614379883,
"p90": 0.04681359329223633,
"p95": 0.046862540435791016,
"p99": 0.04690169815063477,
"stdev": 0.0003197078586637033,
"stdev_": 0.6876874910927033
},
"throughput": {
"unit": "samples/s",
"value": 129.05922812790283
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
null | null | cuda_training_transformers_multiple-choice_FacebookAI/roberta-base | {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "multiple-choice",
"library": "transformers",
"model_type": "roberta",
"model": "FacebookAI/roberta-base",
"processor": "FacebookAI/roberta-base",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
} | true | true | null | null | null |
null | null | null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1344.6144,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"values": [
0.35371826171875,
0.046911487579345705,
0.046137344360351565,
0.046911487579345705,
0.04642201614379883
],
"count": 5,
"total": 0.5401005973815918,
"mean": 0.10802011947631836,
"p50": 0.046911487579345705,
"p90": 0.2309955520629883,
"p95": 0.2923569068908691,
"p99": 0.3414459907531738,
"stdev": 0.1228494290391605,
"stdev_": 113.72828472578502
},
"throughput": {
"unit": "samples/s",
"value": 92.57534659728215
},
"energy": {
"unit": "kWh",
"cpu": 0.0000087234176951385,
"ram": 0.000004761826612293075,
"gpu": 0.000013701955406000051,
"total": 0.000027187199713431626
},
"efficiency": {
"unit": "samples/kWh",
"value": 367820.15453616495
}
} | {
"memory": {
"unit": "MB",
"max_ram": 1344.6144,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"values": [
0.35371826171875,
0.046911487579345705
],
"count": 2,
"total": 0.4006297492980957,
"mean": 0.20031487464904785,
"p50": 0.20031487464904785,
"p90": 0.32303758430480956,
"p95": 0.33837792301177977,
"p99": 0.3506501939773559,
"stdev": 0.15340338706970214,
"stdev_": 76.58112625857927
},
"throughput": {
"unit": "samples/s",
"value": 19.968562030193763
},
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 1344.6144,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"values": [
0.046137344360351565,
0.046911487579345705,
0.04642201614379883
],
"count": 3,
"total": 0.13947084808349608,
"mean": 0.046490282694498695,
"p50": 0.04642201614379883,
"p90": 0.04681359329223633,
"p95": 0.046862540435791016,
"p99": 0.04690169815063477,
"stdev": 0.0003197078586637033,
"stdev_": 0.6876874910927033
},
"throughput": {
"unit": "samples/s",
"value": 129.05922812790283
},
"energy": null,
"efficiency": null
} |
{
"name": "cuda_training_transformers_multiple-choice_FacebookAI/roberta-base",
"backend": {
"name": "pytorch",
"version": "2.2.2",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "multiple-choice",
"model": "FacebookAI/roberta-base",
"library": "transformers",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"hub_kwargs": {
"revision": "main",
"force_download": false,
"local_files_only": false,
"trust_remote_code": false
},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": false
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.29792,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.214-202.855.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.2.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.40.2",
"transformers_commit": null,
"accelerate_version": "0.30.0",
"accelerate_commit": null,
"diffusers_version": "0.27.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "0.9.16",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1093.496832,
"max_global_vram": 3379.03616,
"max_process_vram": 0,
"max_reserved": 2730.491904,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"count": 5,
"total": 0.8026234703063965,
"mean": 0.16052469406127928,
"stdev": 0.22240891148008993,
"p50": 0.04907724761962891,
"p90": 0.38326721343994147,
"p95": 0.49430444412231433,
"p99": 0.5831342286682129,
"values": [
0.6053416748046875,
0.05015552139282226,
0.04897484970092773,
0.04907417678833008,
0.04907724761962891
]
},
"throughput": {
"unit": "samples/s",
"value": 62.295711313940046
},
"energy": null,
"efficiency": null
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1093.496832,
"max_global_vram": 3379.03616,
"max_process_vram": 0,
"max_reserved": 2730.491904,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"count": 2,
"total": 0.6554971961975098,
"mean": 0.3277485980987549,
"stdev": 0.27759307670593264,
"p50": 0.3277485980987549,
"p90": 0.549823059463501,
"p95": 0.5775823671340942,
"p99": 0.5997898132705688,
"values": [
0.6053416748046875,
0.05015552139282226
]
},
"throughput": {
"unit": "samples/s",
"value": 12.204476306546239
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1093.496832,
"max_global_vram": 3379.03616,
"max_process_vram": 0,
"max_reserved": 2730.491904,
"max_allocated": 2516.23424
},
"latency": {
"unit": "s",
"count": 3,
"total": 0.14712627410888673,
"mean": 0.049042091369628914,
"stdev": 0.000047563564546161045,
"p50": 0.04907417678833008,
"p90": 0.04907663345336914,
"p95": 0.049076940536499025,
"p99": 0.04907718620300293,
"values": [
0.04897484970092773,
0.04907417678833008,
0.04907724761962891
]
},
"throughput": {
"unit": "samples/s",
"value": 122.34388527149387
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
{
"name": "cuda_training_transformers_text-classification_FacebookAI/roberta-base",
"backend": {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-classification",
"library": "transformers",
"model_type": "roberta",
"model": "FacebookAI/roberta-base",
"processor": "FacebookAI/roberta-base",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
},
"print_report": true,
"log_report": true
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1331.77344,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"values": [
0.34475726318359373,
0.04758323287963867,
0.04634726333618164,
0.046231487274169925,
0.04656947326660156
],
"count": 5,
"total": 0.5314887199401855,
"mean": 0.10629774398803711,
"p50": 0.04656947326660156,
"p90": 0.22588765106201175,
"p95": 0.2853224571228027,
"p99": 0.3328703019714355,
"stdev": 0.11923071563909368,
"stdev_": 112.16674142445777
},
"throughput": {
"unit": "samples/s",
"value": 94.07537380215156
},
"energy": {
"unit": "kWh",
"cpu": 0.000008602623734721724,
"ram": 0.000004695145724212953,
"gpu": 0.000014463900460000116,
"total": 0.000027761669918934793
},
"efficiency": {
"unit": "samples/kWh",
"value": 360208.87897595525
}
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1331.77344,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"values": [
0.34475726318359373,
0.04758323287963867
],
"count": 2,
"total": 0.3923404960632324,
"mean": 0.1961702480316162,
"p50": 0.1961702480316162,
"p90": 0.31503986015319824,
"p95": 0.329898561668396,
"p99": 0.3417855228805542,
"stdev": 0.14858701515197753,
"stdev_": 75.7439095086581
},
"throughput": {
"unit": "samples/s",
"value": 20.390451865847318
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1331.77344,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"values": [
0.04634726333618164,
0.046231487274169925,
0.04656947326660156
],
"count": 3,
"total": 0.13914822387695314,
"mean": 0.04638274129231771,
"p50": 0.04634726333618164,
"p90": 0.046525031280517576,
"p95": 0.04654725227355957,
"p99": 0.04656502906799316,
"stdev": 0.0001402441841900384,
"stdev_": 0.30236286231160464
},
"throughput": {
"unit": "samples/s",
"value": 129.35846034166525
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
null | null | cuda_training_transformers_text-classification_FacebookAI/roberta-base | {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-classification",
"library": "transformers",
"model_type": "roberta",
"model": "FacebookAI/roberta-base",
"processor": "FacebookAI/roberta-base",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
} | true | true | null | null | null |
null | null | null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1331.77344,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"values": [
0.34475726318359373,
0.04758323287963867,
0.04634726333618164,
0.046231487274169925,
0.04656947326660156
],
"count": 5,
"total": 0.5314887199401855,
"mean": 0.10629774398803711,
"p50": 0.04656947326660156,
"p90": 0.22588765106201175,
"p95": 0.2853224571228027,
"p99": 0.3328703019714355,
"stdev": 0.11923071563909368,
"stdev_": 112.16674142445777
},
"throughput": {
"unit": "samples/s",
"value": 94.07537380215156
},
"energy": {
"unit": "kWh",
"cpu": 0.000008602623734721724,
"ram": 0.000004695145724212953,
"gpu": 0.000014463900460000116,
"total": 0.000027761669918934793
},
"efficiency": {
"unit": "samples/kWh",
"value": 360208.87897595525
}
} | {
"memory": {
"unit": "MB",
"max_ram": 1331.77344,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"values": [
0.34475726318359373,
0.04758323287963867
],
"count": 2,
"total": 0.3923404960632324,
"mean": 0.1961702480316162,
"p50": 0.1961702480316162,
"p90": 0.31503986015319824,
"p95": 0.329898561668396,
"p99": 0.3417855228805542,
"stdev": 0.14858701515197753,
"stdev_": 75.7439095086581
},
"throughput": {
"unit": "samples/s",
"value": 20.390451865847318
},
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 1331.77344,
"max_global_vram": 3384.27904,
"max_process_vram": 0,
"max_reserved": 2728.394752,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"values": [
0.04634726333618164,
0.046231487274169925,
0.04656947326660156
],
"count": 3,
"total": 0.13914822387695314,
"mean": 0.04638274129231771,
"p50": 0.04634726333618164,
"p90": 0.046525031280517576,
"p95": 0.04654725227355957,
"p99": 0.04656502906799316,
"stdev": 0.0001402441841900384,
"stdev_": 0.30236286231160464
},
"throughput": {
"unit": "samples/s",
"value": 129.35846034166525
},
"energy": null,
"efficiency": null
} |
{
"name": "cuda_training_transformers_text-classification_FacebookAI/roberta-base",
"backend": {
"name": "pytorch",
"version": "2.2.2",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-classification",
"model": "FacebookAI/roberta-base",
"library": "transformers",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"hub_kwargs": {
"revision": "main",
"force_download": false,
"local_files_only": false,
"trust_remote_code": false
},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": false
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.29792,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.214-202.855.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.2.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.40.2",
"transformers_commit": null,
"accelerate_version": "0.30.0",
"accelerate_commit": null,
"diffusers_version": "0.27.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "0.9.16",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1080.1152,
"max_global_vram": 3379.03616,
"max_process_vram": 0,
"max_reserved": 2730.491904,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"count": 5,
"total": 0.778464241027832,
"mean": 0.1556928482055664,
"stdev": 0.21129125718980005,
"p50": 0.05020159912109375,
"p90": 0.36730080566406254,
"p95": 0.4727875488281249,
"p99": 0.557176943359375,
"values": [
0.5782742919921875,
0.050840576171875,
0.04969574356079102,
0.049452030181884765,
0.05020159912109375
]
},
"throughput": {
"unit": "samples/s",
"value": 64.22902602948513
},
"energy": null,
"efficiency": null
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1080.1152,
"max_global_vram": 3379.03616,
"max_process_vram": 0,
"max_reserved": 2730.491904,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"count": 2,
"total": 0.6291148681640625,
"mean": 0.31455743408203124,
"stdev": 0.26371685791015625,
"p50": 0.31455743408203124,
"p90": 0.5255309204101563,
"p95": 0.5519026062011718,
"p99": 0.5729999548339844,
"values": [
0.5782742919921875,
0.050840576171875
]
},
"throughput": {
"unit": "samples/s",
"value": 12.716278703357135
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1080.1152,
"max_global_vram": 3379.03616,
"max_process_vram": 0,
"max_reserved": 2730.491904,
"max_allocated": 2516.250112
},
"latency": {
"unit": "s",
"count": 3,
"total": 0.14934937286376954,
"mean": 0.049783124287923176,
"stdev": 0.0003121857804388603,
"p50": 0.04969574356079102,
"p90": 0.05010042800903321,
"p95": 0.050151013565063476,
"p99": 0.0501914820098877,
"values": [
0.04969574356079102,
0.049452030181884765,
0.05020159912109375
]
},
"throughput": {
"unit": "samples/s",
"value": 120.52276922795568
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
{
"name": "cuda_training_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
},
"print_report": true,
"log_report": true
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1358.19264,
"max_global_vram": 3566.731264,
"max_process_vram": 0,
"max_reserved": 2910.846976,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"values": [
0.36726577758789064,
0.04645580673217774,
0.04509900665283203,
0.044455936431884766,
0.0442367057800293
],
"count": 5,
"total": 0.5475132331848144,
"mean": 0.10950264663696288,
"p50": 0.04509900665283203,
"p90": 0.23894178924560552,
"p95": 0.303103783416748,
"p99": 0.3544333787536621,
"stdev": 0.1288838873461337,
"stdev_": 117.69933540823536
},
"throughput": {
"unit": "samples/s",
"value": 91.32199364233884
},
"energy": {
"unit": "kWh",
"cpu": 0.000008815630936805383,
"ram": 0.000004811756360088665,
"gpu": 0.00001449806715399975,
"total": 0.0000281254544508938
},
"efficiency": {
"unit": "samples/kWh",
"value": 355549.81049140735
}
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1358.19264,
"max_global_vram": 3566.731264,
"max_process_vram": 0,
"max_reserved": 2910.846976,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"values": [
0.36726577758789064,
0.04645580673217774
],
"count": 2,
"total": 0.4137215843200684,
"mean": 0.2068607921600342,
"p50": 0.2068607921600342,
"p90": 0.33518478050231937,
"p95": 0.351225279045105,
"p99": 0.3640576778793335,
"stdev": 0.16040498542785644,
"stdev_": 77.54247856875747
},
"throughput": {
"unit": "samples/s",
"value": 19.336675443577874
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1358.19264,
"max_global_vram": 3566.731264,
"max_process_vram": 0,
"max_reserved": 2910.846976,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"values": [
0.04509900665283203,
0.044455936431884766,
0.0442367057800293
],
"count": 3,
"total": 0.1337916488647461,
"mean": 0.0445972162882487,
"p50": 0.044455936431884766,
"p90": 0.04497039260864258,
"p95": 0.0450346996307373,
"p99": 0.04508614524841308,
"stdev": 0.00036593323359118137,
"stdev_": 0.8205293156102307
},
"throughput": {
"unit": "samples/s",
"value": 134.5375451512428
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
null | null | cuda_training_transformers_text-generation_openai-community/gpt2 | {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"library": "transformers",
"model_type": "gpt2",
"model": "openai-community/gpt2",
"processor": "openai-community/gpt2",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
} | true | true | null | null | null |
null | null | null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1358.19264,
"max_global_vram": 3566.731264,
"max_process_vram": 0,
"max_reserved": 2910.846976,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"values": [
0.36726577758789064,
0.04645580673217774,
0.04509900665283203,
0.044455936431884766,
0.0442367057800293
],
"count": 5,
"total": 0.5475132331848144,
"mean": 0.10950264663696288,
"p50": 0.04509900665283203,
"p90": 0.23894178924560552,
"p95": 0.303103783416748,
"p99": 0.3544333787536621,
"stdev": 0.1288838873461337,
"stdev_": 117.69933540823536
},
"throughput": {
"unit": "samples/s",
"value": 91.32199364233884
},
"energy": {
"unit": "kWh",
"cpu": 0.000008815630936805383,
"ram": 0.000004811756360088665,
"gpu": 0.00001449806715399975,
"total": 0.0000281254544508938
},
"efficiency": {
"unit": "samples/kWh",
"value": 355549.81049140735
}
} | {
"memory": {
"unit": "MB",
"max_ram": 1358.19264,
"max_global_vram": 3566.731264,
"max_process_vram": 0,
"max_reserved": 2910.846976,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"values": [
0.36726577758789064,
0.04645580673217774
],
"count": 2,
"total": 0.4137215843200684,
"mean": 0.2068607921600342,
"p50": 0.2068607921600342,
"p90": 0.33518478050231937,
"p95": 0.351225279045105,
"p99": 0.3640576778793335,
"stdev": 0.16040498542785644,
"stdev_": 77.54247856875747
},
"throughput": {
"unit": "samples/s",
"value": 19.336675443577874
},
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 1358.19264,
"max_global_vram": 3566.731264,
"max_process_vram": 0,
"max_reserved": 2910.846976,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"values": [
0.04509900665283203,
0.044455936431884766,
0.0442367057800293
],
"count": 3,
"total": 0.1337916488647461,
"mean": 0.0445972162882487,
"p50": 0.044455936431884766,
"p90": 0.04497039260864258,
"p95": 0.0450346996307373,
"p99": 0.04508614524841308,
"stdev": 0.00036593323359118137,
"stdev_": 0.8205293156102307
},
"throughput": {
"unit": "samples/s",
"value": 134.5375451512428
},
"energy": null,
"efficiency": null
} |
{
"name": "cuda_training_transformers_text-generation_openai-community/gpt2",
"backend": {
"name": "pytorch",
"version": "2.2.2",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "text-generation",
"model": "openai-community/gpt2",
"library": "transformers",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"hub_kwargs": {
"revision": "main",
"force_download": false,
"local_files_only": false,
"trust_remote_code": false
},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": false
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.29792,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.214-202.855.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.2.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.40.2",
"transformers_commit": null,
"accelerate_version": "0.30.0",
"accelerate_commit": null,
"diffusers_version": "0.27.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "0.9.16",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1107.173376,
"max_global_vram": 3563.585536,
"max_process_vram": 0,
"max_reserved": 2915.04128,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"count": 5,
"total": 0.8139294586181639,
"mean": 0.16278589172363278,
"stdev": 0.2273662362653359,
"p50": 0.04927385711669922,
"p90": 0.3902293930053711,
"p95": 0.5038737297058105,
"p99": 0.594789199066162,
"values": [
0.61751806640625,
0.049296382904052735,
0.04860006332397461,
0.0492410888671875,
0.04927385711669922
]
},
"throughput": {
"unit": "samples/s",
"value": 61.4303849929289
},
"energy": null,
"efficiency": null
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1107.173376,
"max_global_vram": 3563.585536,
"max_process_vram": 0,
"max_reserved": 2915.04128,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"count": 2,
"total": 0.6668144493103026,
"mean": 0.3334072246551513,
"stdev": 0.2841108417510986,
"p50": 0.3334072246551513,
"p90": 0.5606958980560303,
"p95": 0.5891069822311401,
"p99": 0.6118358495712279,
"values": [
0.61751806640625,
0.049296382904052735
]
},
"throughput": {
"unit": "samples/s",
"value": 11.99734050195603
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1107.173376,
"max_global_vram": 3563.585536,
"max_process_vram": 0,
"max_reserved": 2915.04128,
"max_allocated": 2523.776
},
"latency": {
"unit": "s",
"count": 3,
"total": 0.1471150093078613,
"mean": 0.04903833643595377,
"stdev": 0.00031019448743967263,
"p50": 0.0492410888671875,
"p90": 0.04926730346679687,
"p95": 0.049270580291748044,
"p99": 0.04927320175170898,
"values": [
0.04860006332397461,
0.0492410888671875,
0.04927385711669922
]
},
"throughput": {
"unit": "samples/s",
"value": 122.35325331307405
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
{
"name": "cuda_training_transformers_token-classification_microsoft/deberta-v3-base",
"backend": {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "token-classification",
"library": "transformers",
"model_type": "deberta-v2",
"model": "microsoft/deberta-v3-base",
"processor": "microsoft/deberta-v3-base",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
},
"print_report": true,
"log_report": true
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1394.876416,
"max_global_vram": 4604.821504,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"values": [
0.41700039672851563,
0.08790105438232422,
0.09080831909179687,
0.08768511962890625,
0.08678800201416016
],
"count": 5,
"total": 0.7701828918457031,
"mean": 0.15403657836914061,
"p50": 0.08790105438232422,
"p90": 0.2865235656738282,
"p95": 0.3517619812011718,
"p99": 0.40395271362304686,
"stdev": 0.13148884148027526,
"stdev_": 85.36208923387608
},
"throughput": {
"unit": "samples/s",
"value": 64.919645099072
},
"energy": {
"unit": "kWh",
"cpu": 0.0000114169215541661,
"ram": 0.000006233076281749713,
"gpu": 0.0000226950181559998,
"total": 0.00004034501599191561
},
"efficiency": {
"unit": "samples/kWh",
"value": 247862.0903757682
}
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1394.876416,
"max_global_vram": 4604.821504,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"values": [
0.41700039672851563,
0.08790105438232422
],
"count": 2,
"total": 0.5049014511108398,
"mean": 0.2524507255554199,
"p50": 0.2524507255554199,
"p90": 0.3840904624938965,
"p95": 0.40054542961120604,
"p99": 0.41370940330505374,
"stdev": 0.1645496711730957,
"stdev_": 65.18090641691283
},
"throughput": {
"unit": "samples/s",
"value": 15.844676188589085
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1394.876416,
"max_global_vram": 4604.821504,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"values": [
0.09080831909179687,
0.08768511962890625,
0.08678800201416016
],
"count": 3,
"total": 0.26528144073486326,
"mean": 0.08842714691162108,
"p50": 0.08768511962890625,
"p90": 0.09018367919921876,
"p95": 0.09049599914550781,
"p99": 0.09074585510253906,
"stdev": 0.0017231155344132189,
"stdev_": 1.9486273102708995
},
"throughput": {
"unit": "samples/s",
"value": 67.85246623411618
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
null | null | cuda_training_transformers_token-classification_microsoft/deberta-v3-base | {
"name": "pytorch",
"version": "2.5.1+cu124",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "token-classification",
"library": "transformers",
"model_type": "deberta-v2",
"model": "microsoft/deberta-v3-base",
"processor": "microsoft/deberta-v3-base",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"model_kwargs": {},
"processor_kwargs": {},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
} | {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"evaluation_strategy": "no",
"eval_strategy": "no",
"save_strategy": "no",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": true
} | {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"numactl": false,
"numactl_kwargs": {},
"start_method": "spawn"
} | {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.248768,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.227-219.884.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.12",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.5.0.dev0",
"optimum_benchmark_commit": null,
"transformers_version": "4.46.3",
"transformers_commit": null,
"accelerate_version": "1.1.1",
"accelerate_commit": null,
"diffusers_version": "0.31.0",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "1.0.11",
"timm_commit": null,
"peft_version": "0.13.2",
"peft_commit": null
} | true | true | null | null | null |
null | null | null | null | null | null | null | null | null | {
"memory": {
"unit": "MB",
"max_ram": 1394.876416,
"max_global_vram": 4604.821504,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"values": [
0.41700039672851563,
0.08790105438232422,
0.09080831909179687,
0.08768511962890625,
0.08678800201416016
],
"count": 5,
"total": 0.7701828918457031,
"mean": 0.15403657836914061,
"p50": 0.08790105438232422,
"p90": 0.2865235656738282,
"p95": 0.3517619812011718,
"p99": 0.40395271362304686,
"stdev": 0.13148884148027526,
"stdev_": 85.36208923387608
},
"throughput": {
"unit": "samples/s",
"value": 64.919645099072
},
"energy": {
"unit": "kWh",
"cpu": 0.0000114169215541661,
"ram": 0.000006233076281749713,
"gpu": 0.0000226950181559998,
"total": 0.00004034501599191561
},
"efficiency": {
"unit": "samples/kWh",
"value": 247862.0903757682
}
} | {
"memory": {
"unit": "MB",
"max_ram": 1394.876416,
"max_global_vram": 4604.821504,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"values": [
0.41700039672851563,
0.08790105438232422
],
"count": 2,
"total": 0.5049014511108398,
"mean": 0.2524507255554199,
"p50": 0.2524507255554199,
"p90": 0.3840904624938965,
"p95": 0.40054542961120604,
"p99": 0.41370940330505374,
"stdev": 0.1645496711730957,
"stdev_": 65.18090641691283
},
"throughput": {
"unit": "samples/s",
"value": 15.844676188589085
},
"energy": null,
"efficiency": null
} | {
"memory": {
"unit": "MB",
"max_ram": 1394.876416,
"max_global_vram": 4604.821504,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"values": [
0.09080831909179687,
0.08768511962890625,
0.08678800201416016
],
"count": 3,
"total": 0.26528144073486326,
"mean": 0.08842714691162108,
"p50": 0.08768511962890625,
"p90": 0.09018367919921876,
"p95": 0.09049599914550781,
"p99": 0.09074585510253906,
"stdev": 0.0017231155344132189,
"stdev_": 1.9486273102708995
},
"throughput": {
"unit": "samples/s",
"value": 67.85246623411618
},
"energy": null,
"efficiency": null
} |
{
"name": "cuda_training_transformers_token-classification_microsoft/deberta-v3-base",
"backend": {
"name": "pytorch",
"version": "2.2.2",
"_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
"task": "token-classification",
"model": "microsoft/deberta-v3-base",
"library": "transformers",
"device": "cuda",
"device_ids": "0",
"seed": 42,
"inter_op_num_threads": null,
"intra_op_num_threads": null,
"hub_kwargs": {
"revision": "main",
"force_download": false,
"local_files_only": false,
"trust_remote_code": false
},
"no_weights": true,
"device_map": null,
"torch_dtype": null,
"eval_mode": true,
"to_bettertransformer": false,
"low_cpu_mem_usage": null,
"attn_implementation": null,
"cache_implementation": null,
"autocast_enabled": false,
"autocast_dtype": null,
"torch_compile": false,
"torch_compile_target": "forward",
"torch_compile_config": {},
"quantization_scheme": null,
"quantization_config": {},
"deepspeed_inference": false,
"deepspeed_inference_config": {},
"peft_type": null,
"peft_config": {}
},
"scenario": {
"name": "training",
"_target_": "optimum_benchmark.scenarios.training.scenario.TrainingScenario",
"max_steps": 5,
"warmup_steps": 2,
"dataset_shapes": {
"dataset_size": 500,
"sequence_length": 16,
"num_choices": 1
},
"training_arguments": {
"per_device_train_batch_size": 2,
"gradient_accumulation_steps": 1,
"output_dir": "./trainer_output",
"do_train": true,
"use_cpu": false,
"max_steps": 5,
"do_eval": false,
"do_predict": false,
"report_to": "none",
"skip_memory_metrics": true,
"ddp_find_unused_parameters": false
},
"latency": true,
"memory": true,
"energy": false
},
"launcher": {
"name": "process",
"_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
"device_isolation": true,
"device_isolation_action": "error",
"start_method": "spawn"
},
"environment": {
"cpu": " AMD EPYC 7R32",
"cpu_count": 16,
"cpu_ram_mb": 66697.29792,
"system": "Linux",
"machine": "x86_64",
"platform": "Linux-5.10.214-202.855.amzn2.x86_64-x86_64-with-glibc2.35",
"processor": "x86_64",
"python_version": "3.10.14",
"gpu": [
"NVIDIA A10G"
],
"gpu_count": 1,
"gpu_vram_mb": 24146608128,
"optimum_benchmark_version": "0.2.0",
"optimum_benchmark_commit": null,
"transformers_version": "4.40.2",
"transformers_commit": null,
"accelerate_version": "0.30.0",
"accelerate_commit": null,
"diffusers_version": "0.27.2",
"diffusers_commit": null,
"optimum_version": null,
"optimum_commit": null,
"timm_version": "0.9.16",
"timm_commit": null,
"peft_version": null,
"peft_commit": null
}
} | {
"overall": {
"memory": {
"unit": "MB",
"max_ram": 1140.764672,
"max_global_vram": 4597.481472,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"count": 5,
"total": 1.0522736740112304,
"mean": 0.21045473480224608,
"stdev": 0.2521626361507063,
"p50": 0.08427519989013672,
"p90": 0.46303764648437507,
"p95": 0.5889081359863281,
"p99": 0.6896045275878907,
"values": [
0.7147786254882813,
0.08542617797851562,
0.08360550689697266,
0.08418816375732421,
0.08427519989013672
]
},
"throughput": {
"unit": "samples/s",
"value": 47.51615595342393
},
"energy": null,
"efficiency": null
},
"warmup": {
"memory": {
"unit": "MB",
"max_ram": 1140.764672,
"max_global_vram": 4597.481472,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"count": 2,
"total": 0.8002048034667969,
"mean": 0.40010240173339845,
"stdev": 0.3146762237548828,
"p50": 0.40010240173339845,
"p90": 0.6518433807373047,
"p95": 0.683311003112793,
"p99": 0.7084851010131836,
"values": [
0.7147786254882813,
0.08542617797851562
]
},
"throughput": {
"unit": "samples/s",
"value": 9.997440611879489
},
"energy": null,
"efficiency": null
},
"train": {
"memory": {
"unit": "MB",
"max_ram": 1140.764672,
"max_global_vram": 4597.481472,
"max_process_vram": 0,
"max_reserved": 3948.937216,
"max_allocated": 3702.95552
},
"latency": {
"unit": "s",
"count": 3,
"total": 0.2520688705444336,
"mean": 0.08402295684814454,
"stdev": 0.0002973125946472148,
"p50": 0.08418816375732421,
"p90": 0.08425779266357422,
"p95": 0.08426649627685547,
"p99": 0.08427345916748047,
"values": [
0.08360550689697266,
0.08418816375732421,
0.08427519989013672
]
},
"throughput": {
"unit": "samples/s",
"value": 71.40905563278207
},
"energy": null,
"efficiency": null
}
} | null | null | null | null | null | null | null | null | null | null |
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 245