fxmarty commited on
Commit
a565b05
1 Parent(s): f6dfa54

Adding regression benchmark for the transformers SHA 0b568291d722aa5e39cd4a8fa05c03200dd280ab

Browse files
Files changed (42) hide show
  1. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/.config/config.yaml +66 -0
  2. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/.config/hydra.yaml +174 -0
  3. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/.config/overrides.yaml +2 -0
  4. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  5. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/inference_results.csv +2 -0
  6. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/main.log +26 -0
  7. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/.config/config.yaml +66 -0
  8. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/.config/hydra.yaml +174 -0
  9. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/.config/overrides.yaml +2 -0
  10. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  11. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/inference_results.csv +2 -0
  12. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/main.log +26 -0
  13. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/.config/config.yaml +66 -0
  14. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/.config/hydra.yaml +174 -0
  15. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/.config/overrides.yaml +2 -0
  16. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  17. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/inference_results.csv +2 -0
  18. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/main.log +26 -0
  19. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/.config/config.yaml +66 -0
  20. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/.config/hydra.yaml +174 -0
  21. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/.config/overrides.yaml +2 -0
  22. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  23. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/inference_results.csv +2 -0
  24. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/main.log +26 -0
  25. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/.config/config.yaml +66 -0
  26. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/.config/hydra.yaml +172 -0
  27. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/.config/overrides.yaml +1 -0
  28. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/main.log +20 -0
  31. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/.config/config.yaml +66 -0
  32. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/.config/hydra.yaml +172 -0
  33. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/.config/overrides.yaml +1 -0
  34. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/hydra_config.yaml +66 -0
  35. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/inference_results.csv +2 -0
  36. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/main.log +20 -0
  37. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/.config/config.yaml +66 -0
  38. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/.config/hydra.yaml +170 -0
  39. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/.config/overrides.yaml +1 -0
  40. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  41. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  42. raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/main.log +20 -0
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/.config/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: ${is_inference:${benchmark.name}}
27
+ eval_mode: ${is_inference:${benchmark.name}}
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: ${infer_task:${model}, ${hub_kwargs.revision}}
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=1
126
+ - backend.torch_dtype=float16
127
+ job:
128
+ name: main
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1
131
+ id: '0'
132
+ num: 0
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=1
2
+ - backend.torch_dtype=float16
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,15651.96288,0.0312,32.1,5.91,33.8
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-17 08:23:47,220][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-17 08:23:47,221][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-17 08:23:48,836][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-17 08:23:48,837][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-17 08:23:48,837][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-17 08:23:48,837][utils][INFO] - device_ids to check: {0}
7
+ [2023-08-17 08:23:48,908][utils][INFO] - os.getpid() 601426
8
+ [2023-08-17 08:23:48,909][utils][INFO] - pids_on_device_id set()
9
+ [2023-08-17 08:23:48,909][backend][INFO] - + Checking contineous device isolation
10
+ [2023-08-17 08:23:48,922][pytorch][INFO] - + Disabling gradients
11
+ [2023-08-17 08:23:48,923][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
12
+ [2023-08-17 08:25:02,794][pytorch][INFO] - + Turning on eval mode
13
+ [2023-08-17 08:25:02,796][inference][INFO] - Running inference benchmark
14
+ [2023-08-17 08:25:10,806][inference][INFO] - + Tracking forward pass peak memory
15
+ [2023-08-17 08:25:12,068][memory_tracker][INFO] - Peak memory usage: 15651.96288 MB
16
+ [2023-08-17 08:25:12,069][inference][INFO] - + Forward pass peak memory: 15651.96288 (MB)
17
+ [2023-08-17 08:25:12,069][inference][INFO] - + Warming up the forward pass
18
+ [2023-08-17 08:25:12,383][inference][INFO] - + Tracking forward pass latency and throughput
19
+ [2023-08-17 08:25:27,654][inference][INFO] - + Forward pass latency: 3.12e-02 (s)
20
+ [2023-08-17 08:25:27,655][inference][INFO] - + Forward pass throughput: 32.10 (samples/s)
21
+ [2023-08-17 08:25:27,655][inference][INFO] - + Warming up the generation pass
22
+ [2023-08-17 08:25:34,245][inference][INFO] - + Tracking generation latency and throughput
23
+ [2023-08-17 08:25:51,980][inference][INFO] - + Generation pass latency: 5.91e+00 (s)
24
+ [2023-08-17 08:25:51,982][inference][INFO] - + Generation pass throughput: 33.80 (tokens/s)
25
+ [2023-08-17 08:25:51,982][inference][INFO] - Saving inference results
26
+ [2023-08-17 08:25:51,991][backend][INFO] - Cleaning backend
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/.config/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: ${is_inference:${benchmark.name}}
27
+ eval_mode: ${is_inference:${benchmark.name}}
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: ${infer_task:${model}, ${hub_kwargs.revision}}
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=1
126
+ - backend.torch_dtype=float32
127
+ job:
128
+ name: main
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1
131
+ id: '1'
132
+ num: 1
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=1
2
+ - backend.torch_dtype=float32
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,29317.005311999998,0.0641,15.6,5.55,36.0
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-17 08:25:52,563][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-17 08:25:52,564][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-17 08:25:53,065][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-17 08:25:53,066][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-17 08:25:53,066][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-17 08:25:53,066][utils][INFO] - device_ids to check: {0}
7
+ [2023-08-17 08:25:53,162][utils][INFO] - os.getpid() 601426
8
+ [2023-08-17 08:25:53,162][utils][INFO] - pids_on_device_id {601426}
9
+ [2023-08-17 08:25:53,162][backend][INFO] - + Checking contineous device isolation
10
+ [2023-08-17 08:25:53,184][pytorch][INFO] - + Disabling gradients
11
+ [2023-08-17 08:25:53,185][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
12
+ [2023-08-17 08:26:10,010][pytorch][INFO] - + Turning on eval mode
13
+ [2023-08-17 08:26:10,011][inference][INFO] - Running inference benchmark
14
+ [2023-08-17 08:26:17,940][inference][INFO] - + Tracking forward pass peak memory
15
+ [2023-08-17 08:26:18,016][memory_tracker][INFO] - Peak memory usage: 29317.005311999998 MB
16
+ [2023-08-17 08:26:18,016][inference][INFO] - + Forward pass peak memory: 29317.005311999998 (MB)
17
+ [2023-08-17 08:26:18,017][inference][INFO] - + Warming up the forward pass
18
+ [2023-08-17 08:26:20,149][inference][INFO] - + Tracking forward pass latency and throughput
19
+ [2023-08-17 08:27:10,046][inference][INFO] - + Forward pass latency: 6.41e-02 (s)
20
+ [2023-08-17 08:27:10,046][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
21
+ [2023-08-17 08:27:10,047][inference][INFO] - + Warming up the generation pass
22
+ [2023-08-17 08:27:15,624][inference][INFO] - + Tracking generation latency and throughput
23
+ [2023-08-17 08:27:32,282][inference][INFO] - + Generation pass latency: 5.55e+00 (s)
24
+ [2023-08-17 08:27:32,283][inference][INFO] - + Generation pass throughput: 36.00 (tokens/s)
25
+ [2023-08-17 08:27:32,283][inference][INFO] - Saving inference results
26
+ [2023-08-17 08:27:32,290][backend][INFO] - Cleaning backend
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/.config/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: ${is_inference:${benchmark.name}}
27
+ eval_mode: ${is_inference:${benchmark.name}}
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: ${infer_task:${model}, ${hub_kwargs.revision}}
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=16
126
+ - backend.torch_dtype=float16
127
+ job:
128
+ name: main
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16
131
+ id: '2'
132
+ num: 2
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=16
2
+ - backend.torch_dtype=float16
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,18841.731072,0.0969,165.0,6.24,513.0
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-17 08:27:32,864][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-17 08:27:32,864][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-17 08:27:33,719][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-17 08:27:33,719][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-17 08:27:33,719][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-17 08:27:33,719][utils][INFO] - device_ids to check: {0}
7
+ [2023-08-17 08:27:33,816][utils][INFO] - os.getpid() 601426
8
+ [2023-08-17 08:27:33,817][utils][INFO] - pids_on_device_id {601426}
9
+ [2023-08-17 08:27:33,817][backend][INFO] - + Checking contineous device isolation
10
+ [2023-08-17 08:27:33,838][pytorch][INFO] - + Disabling gradients
11
+ [2023-08-17 08:27:33,839][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
12
+ [2023-08-17 08:27:44,695][pytorch][INFO] - + Turning on eval mode
13
+ [2023-08-17 08:27:44,697][inference][INFO] - Running inference benchmark
14
+ [2023-08-17 08:27:52,490][inference][INFO] - + Tracking forward pass peak memory
15
+ [2023-08-17 08:27:52,601][memory_tracker][INFO] - Peak memory usage: 18841.731072 MB
16
+ [2023-08-17 08:27:52,601][inference][INFO] - + Forward pass peak memory: 18841.731072 (MB)
17
+ [2023-08-17 08:27:52,602][inference][INFO] - + Warming up the forward pass
18
+ [2023-08-17 08:27:55,210][inference][INFO] - + Tracking forward pass latency and throughput
19
+ [2023-08-17 08:28:35,765][inference][INFO] - + Forward pass latency: 9.69e-02 (s)
20
+ [2023-08-17 08:28:35,766][inference][INFO] - + Forward pass throughput: 165.00 (samples/s)
21
+ [2023-08-17 08:28:35,767][inference][INFO] - + Warming up the generation pass
22
+ [2023-08-17 08:28:42,959][inference][INFO] - + Tracking generation latency and throughput
23
+ [2023-08-17 08:29:01,691][inference][INFO] - + Generation pass latency: 6.24e+00 (s)
24
+ [2023-08-17 08:29:01,692][inference][INFO] - + Generation pass throughput: 513.00 (tokens/s)
25
+ [2023-08-17 08:29:01,692][inference][INFO] - Saving inference results
26
+ [2023-08-17 08:29:01,699][backend][INFO] - Cleaning backend
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/.config/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: ${is_inference:${benchmark.name}}
27
+ eval_mode: ${is_inference:${benchmark.name}}
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: ${infer_task:${model}, ${hub_kwargs.revision}}
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=16
126
+ - backend.torch_dtype=float32
127
+ job:
128
+ name: main
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16
131
+ id: '3'
132
+ num: 3
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=16
2
+ - backend.torch_dtype=float32
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: daryl149/llama-2-7b-chat-hf
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,34801.057792,0.683,23.4,12.9,248.0
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-17 08:29:02,345][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-17 08:29:02,346][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-17 08:29:02,843][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-17 08:29:02,843][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-17 08:29:02,843][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-17 08:29:02,844][utils][INFO] - device_ids to check: {0}
7
+ [2023-08-17 08:29:02,942][utils][INFO] - os.getpid() 601426
8
+ [2023-08-17 08:29:02,942][utils][INFO] - pids_on_device_id {601426}
9
+ [2023-08-17 08:29:02,943][backend][INFO] - + Checking contineous device isolation
10
+ [2023-08-17 08:29:02,964][pytorch][INFO] - + Disabling gradients
11
+ [2023-08-17 08:29:02,965][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
12
+ [2023-08-17 08:29:19,467][pytorch][INFO] - + Turning on eval mode
13
+ [2023-08-17 08:29:19,469][inference][INFO] - Running inference benchmark
14
+ [2023-08-17 08:29:27,471][inference][INFO] - + Tracking forward pass peak memory
15
+ [2023-08-17 08:29:28,181][memory_tracker][INFO] - Peak memory usage: 34801.057792 MB
16
+ [2023-08-17 08:29:28,181][inference][INFO] - + Forward pass peak memory: 34801.057792 (MB)
17
+ [2023-08-17 08:29:28,197][inference][INFO] - + Warming up the forward pass
18
+ [2023-08-17 08:29:53,507][inference][INFO] - + Tracking forward pass latency and throughput
19
+ [2023-08-17 08:30:51,092][inference][INFO] - + Forward pass latency: 6.83e-01 (s)
20
+ [2023-08-17 08:30:51,093][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
21
+ [2023-08-17 08:30:51,094][inference][INFO] - + Warming up the generation pass
22
+ [2023-08-17 08:31:04,233][inference][INFO] - + Tracking generation latency and throughput
23
+ [2023-08-17 08:31:30,058][inference][INFO] - + Generation pass latency: 1.29e+01 (s)
24
+ [2023-08-17 08:31:30,061][inference][INFO] - + Generation pass throughput: 248.00 (tokens/s)
25
+ [2023-08-17 08:31:30,061][inference][INFO] - Saving inference results
26
+ [2023-08-17 08:31:30,068][backend][INFO] - Cleaning backend
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/.config/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: ${is_inference:${benchmark.name}}
27
+ eval_mode: ${is_inference:${benchmark.name}}
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/.config/hydra.yaml ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,4
14
+ help:
15
+ app_name: ${hydra.job.name}
16
+ header: '${hydra.help.app_name} is powered by Hydra.
17
+
18
+ '
19
+ footer: 'Powered by Hydra (https://hydra.cc)
20
+
21
+ Use --hydra-help to view Hydra specific help
22
+
23
+ '
24
+ template: '${hydra.help.header}
25
+
26
+ == Configuration groups ==
27
+
28
+ Compose your configuration from those groups (group=option)
29
+
30
+
31
+ $APP_CONFIG_GROUPS
32
+
33
+
34
+ == Config ==
35
+
36
+ Override anything in the config (foo.bar=value)
37
+
38
+
39
+ $CONFIG
40
+
41
+
42
+ ${hydra.help.footer}
43
+
44
+ '
45
+ hydra_help:
46
+ template: 'Hydra (${hydra.runtime.version})
47
+
48
+ See https://hydra.cc for more info.
49
+
50
+
51
+ == Flags ==
52
+
53
+ $FLAGS_HELP
54
+
55
+
56
+ == Configuration groups ==
57
+
58
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
59
+ to command line)
60
+
61
+
62
+ $HYDRA_CONFIG_GROUPS
63
+
64
+
65
+ Use ''--cfg hydra'' to Show the Hydra config.
66
+
67
+ '
68
+ hydra_help: ???
69
+ hydra_logging:
70
+ version: 1
71
+ formatters:
72
+ colorlog:
73
+ (): colorlog.ColoredFormatter
74
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
75
+ handlers:
76
+ console:
77
+ class: logging.StreamHandler
78
+ formatter: colorlog
79
+ stream: ext://sys.stdout
80
+ root:
81
+ level: INFO
82
+ handlers:
83
+ - console
84
+ disable_existing_loggers: false
85
+ job_logging:
86
+ version: 1
87
+ formatters:
88
+ simple:
89
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
90
+ colorlog:
91
+ (): colorlog.ColoredFormatter
92
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
93
+ - %(message)s'
94
+ log_colors:
95
+ DEBUG: purple
96
+ INFO: green
97
+ WARNING: yellow
98
+ ERROR: red
99
+ CRITICAL: red
100
+ handlers:
101
+ console:
102
+ class: logging.StreamHandler
103
+ formatter: colorlog
104
+ stream: ext://sys.stdout
105
+ file:
106
+ class: logging.FileHandler
107
+ formatter: simple
108
+ filename: ${hydra.job.name}.log
109
+ root:
110
+ level: INFO
111
+ handlers:
112
+ - console
113
+ - file
114
+ disable_existing_loggers: false
115
+ env: {}
116
+ mode: MULTIRUN
117
+ searchpath: []
118
+ callbacks: {}
119
+ output_subdir: .hydra
120
+ overrides:
121
+ hydra:
122
+ - hydra.mode=MULTIRUN
123
+ task:
124
+ - benchmark.input_shapes.batch_size=1
125
+ job:
126
+ name: main
127
+ chdir: true
128
+ override_dirname: benchmark.input_shapes.batch_size=1
129
+ id: '0'
130
+ num: 0
131
+ config_name: bert_cpu_inference
132
+ env_set: {}
133
+ env_copy: []
134
+ config:
135
+ override_dirname:
136
+ kv_sep: '='
137
+ item_sep: ','
138
+ exclude_keys: []
139
+ runtime:
140
+ version: 1.3.2
141
+ version_base: '1.3'
142
+ cwd: /home/user/transformers-regression
143
+ config_sources:
144
+ - path: hydra.conf
145
+ schema: pkg
146
+ provider: hydra
147
+ - path: optimum_benchmark
148
+ schema: pkg
149
+ provider: main
150
+ - path: hydra_plugins.hydra_colorlog.conf
151
+ schema: pkg
152
+ provider: hydra-colorlog
153
+ - path: /home/user/transformers-regression/configs
154
+ schema: file
155
+ provider: command-line
156
+ - path: ''
157
+ schema: structured
158
+ provider: schema
159
+ output_dir: /home/user/transformers-regression/sweeps/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0
160
+ choices:
161
+ benchmark: inference
162
+ backend: pytorch
163
+ hydra/env: default
164
+ hydra/callbacks: null
165
+ hydra/job_logging: colorlog
166
+ hydra/hydra_logging: colorlog
167
+ hydra/hydra_help: default
168
+ hydra/help: default
169
+ hydra/sweeper: basic
170
+ hydra/launcher: basic
171
+ hydra/output: default
172
+ verbose: false
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/.config/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ - benchmark.input_shapes.batch_size=1
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,466.591744,0.00311,322.0
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-17 08:22:40,089][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-17 08:22:40,090][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-17 08:22:41,460][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-17 08:22:41,460][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-17 08:22:41,461][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-17 08:22:41,461][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-17 08:22:41,461][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-17 08:22:41,461][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-17 08:22:42,093][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-17 08:22:42,094][inference][INFO] - Running inference benchmark
11
+ [2023-08-17 08:22:42,218][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-17 08:22:42,219][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-17 08:22:42,278][inference][INFO] - + Forward pass peak memory: 466.591744 (MB)
14
+ [2023-08-17 08:22:42,279][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-17 08:22:42,281][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-17 08:22:42,322][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-17 08:22:57,508][inference][INFO] - + Forward pass latency: 3.11e-03 (s)
18
+ [2023-08-17 08:22:57,511][inference][INFO] - + Forward pass throughput: 322.00 (samples/s)
19
+ [2023-08-17 08:22:57,511][inference][INFO] - Saving inference results
20
+ [2023-08-17 08:22:57,528][backend][INFO] - Cleaning backend
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/.config/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: ${is_inference:${benchmark.name}}
27
+ eval_mode: ${is_inference:${benchmark.name}}
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 16
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/.config/hydra.yaml ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,4
14
+ help:
15
+ app_name: ${hydra.job.name}
16
+ header: '${hydra.help.app_name} is powered by Hydra.
17
+
18
+ '
19
+ footer: 'Powered by Hydra (https://hydra.cc)
20
+
21
+ Use --hydra-help to view Hydra specific help
22
+
23
+ '
24
+ template: '${hydra.help.header}
25
+
26
+ == Configuration groups ==
27
+
28
+ Compose your configuration from those groups (group=option)
29
+
30
+
31
+ $APP_CONFIG_GROUPS
32
+
33
+
34
+ == Config ==
35
+
36
+ Override anything in the config (foo.bar=value)
37
+
38
+
39
+ $CONFIG
40
+
41
+
42
+ ${hydra.help.footer}
43
+
44
+ '
45
+ hydra_help:
46
+ template: 'Hydra (${hydra.runtime.version})
47
+
48
+ See https://hydra.cc for more info.
49
+
50
+
51
+ == Flags ==
52
+
53
+ $FLAGS_HELP
54
+
55
+
56
+ == Configuration groups ==
57
+
58
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
59
+ to command line)
60
+
61
+
62
+ $HYDRA_CONFIG_GROUPS
63
+
64
+
65
+ Use ''--cfg hydra'' to Show the Hydra config.
66
+
67
+ '
68
+ hydra_help: ???
69
+ hydra_logging:
70
+ version: 1
71
+ formatters:
72
+ colorlog:
73
+ (): colorlog.ColoredFormatter
74
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
75
+ handlers:
76
+ console:
77
+ class: logging.StreamHandler
78
+ formatter: colorlog
79
+ stream: ext://sys.stdout
80
+ root:
81
+ level: INFO
82
+ handlers:
83
+ - console
84
+ disable_existing_loggers: false
85
+ job_logging:
86
+ version: 1
87
+ formatters:
88
+ simple:
89
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
90
+ colorlog:
91
+ (): colorlog.ColoredFormatter
92
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
93
+ - %(message)s'
94
+ log_colors:
95
+ DEBUG: purple
96
+ INFO: green
97
+ WARNING: yellow
98
+ ERROR: red
99
+ CRITICAL: red
100
+ handlers:
101
+ console:
102
+ class: logging.StreamHandler
103
+ formatter: colorlog
104
+ stream: ext://sys.stdout
105
+ file:
106
+ class: logging.FileHandler
107
+ formatter: simple
108
+ filename: ${hydra.job.name}.log
109
+ root:
110
+ level: INFO
111
+ handlers:
112
+ - console
113
+ - file
114
+ disable_existing_loggers: false
115
+ env: {}
116
+ mode: MULTIRUN
117
+ searchpath: []
118
+ callbacks: {}
119
+ output_subdir: .hydra
120
+ overrides:
121
+ hydra:
122
+ - hydra.mode=MULTIRUN
123
+ task:
124
+ - benchmark.input_shapes.batch_size=4
125
+ job:
126
+ name: main
127
+ chdir: true
128
+ override_dirname: benchmark.input_shapes.batch_size=4
129
+ id: '1'
130
+ num: 1
131
+ config_name: bert_cpu_inference
132
+ env_set: {}
133
+ env_copy: []
134
+ config:
135
+ override_dirname:
136
+ kv_sep: '='
137
+ item_sep: ','
138
+ exclude_keys: []
139
+ runtime:
140
+ version: 1.3.2
141
+ version_base: '1.3'
142
+ cwd: /home/user/transformers-regression
143
+ config_sources:
144
+ - path: hydra.conf
145
+ schema: pkg
146
+ provider: hydra
147
+ - path: optimum_benchmark
148
+ schema: pkg
149
+ provider: main
150
+ - path: hydra_plugins.hydra_colorlog.conf
151
+ schema: pkg
152
+ provider: hydra-colorlog
153
+ - path: /home/user/transformers-regression/configs
154
+ schema: file
155
+ provider: command-line
156
+ - path: ''
157
+ schema: structured
158
+ provider: schema
159
+ output_dir: /home/user/transformers-regression/sweeps/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1
160
+ choices:
161
+ benchmark: inference
162
+ backend: pytorch
163
+ hydra/env: default
164
+ hydra/callbacks: null
165
+ hydra/job_logging: colorlog
166
+ hydra/hydra_logging: colorlog
167
+ hydra/hydra_help: default
168
+ hydra/help: default
169
+ hydra/sweeper: basic
170
+ hydra/launcher: basic
171
+ hydra/output: default
172
+ verbose: false
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/.config/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ - benchmark.input_shapes.batch_size=4
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 15
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 16
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,467.91475199999996,0.00343,1170.0
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_bert_inference/1/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-17 08:22:57,918][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-17 08:22:57,919][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-17 08:22:58,394][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-17 08:22:58,395][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-17 08:22:58,395][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-17 08:22:58,395][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-17 08:22:58,395][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-17 08:22:58,395][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-17 08:22:58,532][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-17 08:22:58,532][inference][INFO] - Running inference benchmark
11
+ [2023-08-17 08:22:58,658][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-17 08:22:58,660][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-17 08:22:58,704][inference][INFO] - + Forward pass peak memory: 467.91475199999996 (MB)
14
+ [2023-08-17 08:22:58,705][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-17 08:22:58,706][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-17 08:22:58,742][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-17 08:23:13,906][inference][INFO] - + Forward pass latency: 3.43e-03 (s)
18
+ [2023-08-17 08:23:13,908][inference][INFO] - + Forward pass throughput: 1170.00 (samples/s)
19
+ [2023-08-17 08:23:13,909][inference][INFO] - Saving inference results
20
+ [2023-08-17 08:23:13,920][backend][INFO] - Cleaning backend
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/.config/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: ${is_inference:${benchmark.name}}
27
+ eval_mode: ${is_inference:${benchmark.name}}
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: false
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/.config/hydra.yaml ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ colorlog:
72
+ (): colorlog.ColoredFormatter
73
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
74
+ handlers:
75
+ console:
76
+ class: logging.StreamHandler
77
+ formatter: colorlog
78
+ stream: ext://sys.stdout
79
+ root:
80
+ level: INFO
81
+ handlers:
82
+ - console
83
+ disable_existing_loggers: false
84
+ job_logging:
85
+ version: 1
86
+ formatters:
87
+ simple:
88
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
89
+ colorlog:
90
+ (): colorlog.ColoredFormatter
91
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
92
+ - %(message)s'
93
+ log_colors:
94
+ DEBUG: purple
95
+ INFO: green
96
+ WARNING: yellow
97
+ ERROR: red
98
+ CRITICAL: red
99
+ handlers:
100
+ console:
101
+ class: logging.StreamHandler
102
+ formatter: colorlog
103
+ stream: ext://sys.stdout
104
+ file:
105
+ class: logging.FileHandler
106
+ formatter: simple
107
+ filename: ${hydra.job.name}.log
108
+ root:
109
+ level: INFO
110
+ handlers:
111
+ - console
112
+ - file
113
+ disable_existing_loggers: false
114
+ env: {}
115
+ mode: MULTIRUN
116
+ searchpath: []
117
+ callbacks: {}
118
+ output_subdir: .hydra
119
+ overrides:
120
+ hydra:
121
+ - hydra.mode=MULTIRUN
122
+ task: []
123
+ job:
124
+ name: main
125
+ chdir: true
126
+ override_dirname: ''
127
+ id: '0'
128
+ num: 0
129
+ config_name: gpt2_cpu_inference
130
+ env_set: {}
131
+ env_copy: []
132
+ config:
133
+ override_dirname:
134
+ kv_sep: '='
135
+ item_sep: ','
136
+ exclude_keys: []
137
+ runtime:
138
+ version: 1.3.2
139
+ version_base: '1.3'
140
+ cwd: /home/user/transformers-regression
141
+ config_sources:
142
+ - path: hydra.conf
143
+ schema: pkg
144
+ provider: hydra
145
+ - path: optimum_benchmark
146
+ schema: pkg
147
+ provider: main
148
+ - path: hydra_plugins.hydra_colorlog.conf
149
+ schema: pkg
150
+ provider: hydra-colorlog
151
+ - path: /home/user/transformers-regression/configs
152
+ schema: file
153
+ provider: command-line
154
+ - path: ''
155
+ schema: structured
156
+ provider: schema
157
+ output_dir: /home/user/transformers-regression/sweeps/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0
158
+ choices:
159
+ benchmark: inference
160
+ backend: pytorch
161
+ hydra/env: default
162
+ hydra/callbacks: null
163
+ hydra/job_logging: colorlog
164
+ hydra/hydra_logging: colorlog
165
+ hydra/hydra_help: default
166
+ hydra/help: default
167
+ hydra/sweeper: basic
168
+ hydra/launcher: basic
169
+ hydra/output: default
170
+ verbose: false
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/.config/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: false
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 1
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.1
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,0.00351,285.0,0.523,191.0
raw_results/2023-08-16_10:49:29_0b568291d722aa5e39cd4a8fa05c03200dd280ab/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-17 08:23:18,740][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-17 08:23:18,741][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-17 08:23:20,207][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-17 08:23:20,208][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-17 08:23:20,208][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-17 08:23:20,208][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-17 08:23:20,208][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-17 08:23:20,209][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-17 08:23:20,868][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-17 08:23:20,868][inference][INFO] - Running inference benchmark
11
+ [2023-08-17 08:23:21,077][inference][INFO] - + Warming up the forward pass
12
+ [2023-08-17 08:23:21,115][inference][INFO] - + Tracking forward pass latency and throughput
13
+ [2023-08-17 08:23:31,218][inference][INFO] - + Forward pass latency: 3.51e-03 (s)
14
+ [2023-08-17 08:23:31,220][inference][INFO] - + Forward pass throughput: 285.00 (samples/s)
15
+ [2023-08-17 08:23:31,221][inference][INFO] - + Warming up the generation pass
16
+ [2023-08-17 08:23:31,717][inference][INFO] - + Tracking generation latency and throughput
17
+ [2023-08-17 08:23:42,178][inference][INFO] - + Generation pass latency: 5.23e-01 (s)
18
+ [2023-08-17 08:23:42,179][inference][INFO] - + Generation pass throughput: 191.00 (tokens/s)
19
+ [2023-08-17 08:23:42,179][inference][INFO] - Saving inference results
20
+ [2023-08-17 08:23:42,192][backend][INFO] - Cleaning backend