|
hydra: |
|
run: |
|
dir: _benchmark/tmpcnl9kbor/commit=48681e6e5ef08e5af8b3feb5683f044321605f1f/${hydra.job.override_dirname} |
|
sweep: |
|
dir: _benchmark/tmpcnl9kbor/commit=48681e6e5ef08e5af8b3feb5683f044321605f1f |
|
subdir: ${hydra.job.override_dirname} |
|
launcher: |
|
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher |
|
sweeper: |
|
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper |
|
max_batch_size: null |
|
params: null |
|
help: |
|
app_name: ${hydra.job.name} |
|
header: '${hydra.help.app_name} is powered by Hydra. |
|
|
|
' |
|
footer: 'Powered by Hydra (https://hydra.cc) |
|
|
|
Use --hydra-help to view Hydra specific help |
|
|
|
' |
|
template: '${hydra.help.header} |
|
|
|
== Configuration groups == |
|
|
|
Compose your configuration from those groups (group=option) |
|
|
|
|
|
$APP_CONFIG_GROUPS |
|
|
|
|
|
== Config == |
|
|
|
Override anything in the config (foo.bar=value) |
|
|
|
|
|
$CONFIG |
|
|
|
|
|
${hydra.help.footer} |
|
|
|
' |
|
hydra_help: |
|
template: 'Hydra (${hydra.runtime.version}) |
|
|
|
See https://hydra.cc for more info. |
|
|
|
|
|
== Flags == |
|
|
|
$FLAGS_HELP |
|
|
|
|
|
== Configuration groups == |
|
|
|
Compose your configuration from those groups (For example, append hydra/job_logging=disabled |
|
to command line) |
|
|
|
|
|
$HYDRA_CONFIG_GROUPS |
|
|
|
|
|
Use ''--cfg hydra'' to Show the Hydra config. |
|
|
|
' |
|
hydra_help: ??? |
|
hydra_logging: |
|
version: 1 |
|
root: |
|
level: ERROR |
|
disable_existing_loggers: true |
|
job_logging: |
|
version: 1 |
|
root: |
|
level: ERROR |
|
disable_existing_loggers: true |
|
env: {} |
|
mode: MULTIRUN |
|
searchpath: [] |
|
callbacks: {} |
|
output_subdir: .hydra |
|
overrides: |
|
hydra: |
|
- hydra/job_logging=disabled |
|
- hydra/hydra_logging=disabled |
|
- hydra.sweep.dir=_benchmark/tmpcnl9kbor/commit\=48681e6e5ef08e5af8b3feb5683f044321605f1f |
|
- hydra.run.dir=_benchmark/tmpcnl9kbor/commit\=48681e6e5ef08e5af8b3feb5683f044321605f1f/${hydra.job.override_dirname} |
|
- hydra.mode=MULTIRUN |
|
task: |
|
- backend.model=google/gemma-2b |
|
- backend.cache_implementation=null,static |
|
- backend.torch_compile=false,true |
|
job: |
|
name: cli |
|
chdir: true |
|
override_dirname: backend.cache_implementation=null,static,backend.model=google/gemma-2b,backend.torch_compile=false,true |
|
id: ??? |
|
num: ??? |
|
config_name: generation |
|
env_set: |
|
OVERRIDE_BENCHMARKS: '1' |
|
LOG_LEVEL: WARN |
|
env_copy: [] |
|
config: |
|
override_dirname: |
|
kv_sep: '=' |
|
item_sep: ',' |
|
exclude_keys: [] |
|
runtime: |
|
version: 1.3.2 |
|
version_base: '1.3' |
|
cwd: /transformers |
|
config_sources: |
|
- path: hydra.conf |
|
schema: pkg |
|
provider: hydra |
|
- path: optimum_benchmark |
|
schema: pkg |
|
provider: main |
|
- path: /transformers/benchmark/config |
|
schema: file |
|
provider: command-line |
|
- path: '' |
|
schema: structured |
|
provider: schema |
|
output_dir: ??? |
|
choices: |
|
backend: pytorch |
|
launcher: process |
|
scenario: inference |
|
hydra/env: default |
|
hydra/callbacks: null |
|
hydra/job_logging: disabled |
|
hydra/hydra_logging: disabled |
|
hydra/hydra_help: default |
|
hydra/help: default |
|
hydra/sweeper: basic |
|
hydra/launcher: basic |
|
hydra/output: default |
|
verbose: false |
|
name: pytorch_generate |
|
backend: |
|
name: pytorch |
|
version: 2.3.0+cu121 |
|
_target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend |
|
task: null |
|
library: null |
|
model: google/gemma-2b |
|
processor: null |
|
device: cuda |
|
device_ids: '0' |
|
seed: 42 |
|
inter_op_num_threads: null |
|
intra_op_num_threads: null |
|
model_kwargs: {} |
|
processor_kwargs: {} |
|
hub_kwargs: {} |
|
no_weights: true |
|
device_map: null |
|
torch_dtype: float16 |
|
eval_mode: true |
|
to_bettertransformer: false |
|
low_cpu_mem_usage: null |
|
attn_implementation: null |
|
cache_implementation: static |
|
autocast_enabled: false |
|
autocast_dtype: null |
|
torch_compile: true |
|
torch_compile_target: forward |
|
torch_compile_config: |
|
backend: inductor |
|
mode: reduce-overhead |
|
fullgraph: true |
|
quantization_scheme: null |
|
quantization_config: {} |
|
deepspeed_inference: false |
|
deepspeed_inference_config: {} |
|
peft_type: null |
|
peft_config: {} |
|
scenario: |
|
name: inference |
|
_target_: optimum_benchmark.scenarios.inference.scenario.InferenceScenario |
|
iterations: 2 |
|
duration: 0 |
|
warmup_runs: 10 |
|
input_shapes: |
|
batch_size: 1 |
|
sequence_length: 7 |
|
new_tokens: null |
|
latency: true |
|
memory: true |
|
energy: false |
|
forward_kwargs: {} |
|
generate_kwargs: |
|
max_new_tokens: 128 |
|
min_new_tokens: 128 |
|
do_sample: false |
|
call_kwargs: {} |
|
launcher: |
|
name: process |
|
_target_: optimum_benchmark.launchers.process.launcher.ProcessLauncher |
|
device_isolation: true |
|
device_isolation_action: warn |
|
start_method: spawn |
|
environment: |
|
cpu: ' AMD EPYC 7R32' |
|
cpu_count: 16 |
|
cpu_ram_mb: 66697.29792 |
|
system: Linux |
|
machine: x86_64 |
|
platform: Linux-5.10.216-204.855.amzn2.x86_64-x86_64-with-glibc2.29 |
|
processor: x86_64 |
|
python_version: 3.8.10 |
|
gpu: |
|
- NVIDIA A10G |
|
gpu_count: 1 |
|
gpu_vram_mb: 24146608128 |
|
optimum_benchmark_version: 0.2.1 |
|
optimum_benchmark_commit: null |
|
transformers_version: 4.42.0.dev0 |
|
transformers_commit: 48681e6e5ef08e5af8b3feb5683f044321605f1f |
|
accelerate_version: 0.31.0.dev0 |
|
accelerate_commit: null |
|
diffusers_version: null |
|
diffusers_commit: null |
|
optimum_version: 1.21.0.dev0 |
|
optimum_commit: null |
|
timm_version: 0.9.16 |
|
timm_commit: null |
|
peft_version: 0.11.2.dev0 |
|
peft_commit: null |
|
|