fxmarty commited on
Commit
22bda64
1 Parent(s): 78936d4

Adding regression benchmark for the transformers SHA 0acf56224bd98cbf0413218f362a3a61805a7844

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,58202.128383999996,0.0319,31.3,7.12,28.1
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 14:49:41,960][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 14:49:41,960][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 14:49:42,262][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 14:49:42,262][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 14:49:42,262][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 14:49:42,525][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 14:49:42,535][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 14:49:42,536][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 14:50:51,092][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 14:50:51,094][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 14:50:59,079][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 14:51:01,978][memory_tracker][INFO] - Peak memory usage: 58202.128383999996 MB
13
+ [2023-08-11 14:51:01,978][inference][INFO] - + Forward pass peak memory: 58202.128383999996 (MB)
14
+ [2023-08-11 14:51:01,978][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 14:51:02,633][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 14:51:45,045][inference][INFO] - + Forward pass latency: 3.19e-02 (s)
17
+ [2023-08-11 14:51:45,046][inference][INFO] - + Forward pass throughput: 31.30 (samples/s)
18
+ [2023-08-11 14:51:45,047][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 14:51:52,813][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 14:52:14,166][inference][INFO] - + Generation pass latency: 7.12e+00 (s)
21
+ [2023-08-11 14:52:14,169][inference][INFO] - + Generation pass throughput: 28.10 (tokens/s)
22
+ [2023-08-11 14:52:14,169][inference][INFO] - Saving inference results
23
+ [2023-08-11 14:52:14,178][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,72324.34995199999,0.144,6.94,10.6,18.9
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 14:52:14,640][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 14:52:14,641][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 14:52:14,843][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 14:52:14,843][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 14:52:14,843][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 14:52:15,066][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 14:52:15,089][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 14:52:15,089][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 14:52:31,638][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 14:52:31,640][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 14:52:39,294][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 14:52:39,442][memory_tracker][INFO] - Peak memory usage: 72324.34995199999 MB
13
+ [2023-08-11 14:52:39,442][inference][INFO] - + Forward pass peak memory: 72324.34995199999 (MB)
14
+ [2023-08-11 14:52:39,443][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 14:52:44,241][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 14:53:51,968][inference][INFO] - + Forward pass latency: 1.44e-01 (s)
17
+ [2023-08-11 14:53:51,969][inference][INFO] - + Forward pass throughput: 6.94 (samples/s)
18
+ [2023-08-11 14:53:51,969][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 14:54:02,843][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 14:54:24,101][inference][INFO] - + Generation pass latency: 1.06e+01 (s)
21
+ [2023-08-11 14:54:24,104][inference][INFO] - + Generation pass throughput: 18.90 (tokens/s)
22
+ [2023-08-11 14:54:24,104][inference][INFO] - Saving inference results
23
+ [2023-08-11 14:54:24,110][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,59911.307263999995,0.0412,48.5,7.58,52.8
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 14:54:24,613][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 14:54:24,613][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 14:54:24,802][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 14:54:24,802][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 14:54:24,802][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 14:54:25,025][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 14:54:25,046][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 14:54:25,047][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 14:54:35,688][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 14:54:35,690][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 14:54:43,378][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 14:54:43,428][memory_tracker][INFO] - Peak memory usage: 59911.307263999995 MB
13
+ [2023-08-11 14:54:43,428][inference][INFO] - + Forward pass peak memory: 59911.307263999995 (MB)
14
+ [2023-08-11 14:54:43,428][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 14:54:44,488][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 14:55:34,919][inference][INFO] - + Forward pass latency: 4.12e-02 (s)
17
+ [2023-08-11 14:55:34,920][inference][INFO] - + Forward pass throughput: 48.50 (samples/s)
18
+ [2023-08-11 14:55:34,921][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 14:55:43,264][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 14:56:06,008][inference][INFO] - + Generation pass latency: 7.58e+00 (s)
21
+ [2023-08-11 14:56:06,011][inference][INFO] - + Generation pass throughput: 52.80 (tokens/s)
22
+ [2023-08-11 14:56:06,012][inference][INFO] - Saving inference results
23
+ [2023-08-11 14:56:06,017][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30778.720255999997,0.153,13.1,15.3,26.1
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 14:56:06,501][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 14:56:06,502][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 14:56:06,700][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 14:56:06,700][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 14:56:06,700][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 14:56:06,931][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 14:56:06,953][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 14:56:06,953][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 14:56:23,698][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 14:56:23,700][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 14:56:31,594][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 14:56:31,741][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
+ [2023-08-11 14:56:31,742][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
+ [2023-08-11 14:56:31,742][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 14:56:35,563][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 14:57:47,315][inference][INFO] - + Forward pass latency: 1.53e-01 (s)
17
+ [2023-08-11 14:57:47,315][inference][INFO] - + Forward pass throughput: 13.10 (samples/s)
18
+ [2023-08-11 14:57:47,316][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 14:58:02,645][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 14:58:33,331][inference][INFO] - + Generation pass latency: 1.53e+01 (s)
21
+ [2023-08-11 14:58:33,334][inference][INFO] - + Generation pass throughput: 26.10 (tokens/s)
22
+ [2023-08-11 14:58:33,334][inference][INFO] - Saving inference results
23
+ [2023-08-11 14:58:33,343][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,59007.434751999994,0.0574,69.7,8.3,96.4
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 14:58:33,877][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 14:58:33,878][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 14:58:34,075][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 14:58:34,076][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 14:58:34,076][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 14:58:34,310][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 14:58:34,332][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 14:58:34,333][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 14:58:45,266][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 14:58:45,267][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 14:58:52,992][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 14:58:53,052][memory_tracker][INFO] - Peak memory usage: 59007.434751999994 MB
13
+ [2023-08-11 14:58:53,052][inference][INFO] - + Forward pass peak memory: 59007.434751999994 (MB)
14
+ [2023-08-11 14:58:53,053][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 14:58:54,625][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 14:59:51,099][inference][INFO] - + Forward pass latency: 5.74e-02 (s)
17
+ [2023-08-11 14:59:51,100][inference][INFO] - + Forward pass throughput: 69.70 (samples/s)
18
+ [2023-08-11 14:59:51,101][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:00:00,339][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:00:25,244][inference][INFO] - + Generation pass latency: 8.30e+00 (s)
21
+ [2023-08-11 15:00:25,248][inference][INFO] - + Generation pass throughput: 96.40 (tokens/s)
22
+ [2023-08-11 15:00:25,248][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:00:25,253][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,75262.459904,0.402,9.95,16.6,48.2
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:00:25,760][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:00:25,761][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:00:25,950][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:00:25,950][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:00:25,950][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:00:26,184][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:00:26,206][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:00:26,207][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 15:00:42,893][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:00:42,895][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:00:50,667][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:00:51,091][memory_tracker][INFO] - Peak memory usage: 75262.459904 MB
13
+ [2023-08-11 15:00:51,091][inference][INFO] - + Forward pass peak memory: 75262.459904 (MB)
14
+ [2023-08-11 15:00:51,100][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:01:06,031][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:02:21,973][inference][INFO] - + Forward pass latency: 4.02e-01 (s)
17
+ [2023-08-11 15:02:21,974][inference][INFO] - + Forward pass throughput: 9.95 (samples/s)
18
+ [2023-08-11 15:02:21,975][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:02:38,596][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:03:11,866][inference][INFO] - + Generation pass latency: 1.66e+01 (s)
21
+ [2023-08-11 15:03:11,870][inference][INFO] - + Generation pass throughput: 48.20 (tokens/s)
22
+ [2023-08-11 15:03:11,870][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:03:11,876][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,63279.333375999995,0.12,133.0,6.44,497.0
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:03:12,405][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:03:12,405][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:03:12,613][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:03:12,613][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:03:12,613][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:03:12,841][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:03:12,862][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:03:12,863][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 15:03:23,477][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:03:23,479][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:03:31,158][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:03:31,382][memory_tracker][INFO] - Peak memory usage: 63279.333375999995 MB
13
+ [2023-08-11 15:03:31,382][inference][INFO] - + Forward pass peak memory: 63279.333375999995 (MB)
14
+ [2023-08-11 15:03:31,382][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:03:37,045][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:04:31,170][inference][INFO] - + Forward pass latency: 1.20e-01 (s)
17
+ [2023-08-11 15:04:31,171][inference][INFO] - + Forward pass throughput: 133.00 (samples/s)
18
+ [2023-08-11 15:04:31,172][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:04:37,857][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:05:03,638][inference][INFO] - + Generation pass latency: 6.44e+00 (s)
21
+ [2023-08-11 15:05:03,641][inference][INFO] - + Generation pass throughput: 497.00 (tokens/s)
22
+ [2023-08-11 15:05:03,642][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:05:03,647][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.684,23.4,13.0,246.0
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:05:04,239][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:05:04,240][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:05:04,590][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:05:04,590][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:05:04,590][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:05:04,697][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:05:04,719][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:05:04,720][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 15:05:21,726][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:05:21,728][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:05:29,485][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:05:30,186][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-11 15:05:30,186][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-11 15:05:30,202][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:05:55,550][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:07:13,498][inference][INFO] - + Forward pass latency: 6.84e-01 (s)
17
+ [2023-08-11 15:07:13,499][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-11 15:07:13,500][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:07:27,210][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:07:53,172][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
+ [2023-08-11 15:07:53,175][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
+ [2023-08-11 15:07:53,175][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:07:53,181][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,459.95622399999996,0.00326,307.0
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:07:58,180][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:07:58,181][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:07:58,370][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-11 15:07:58,371][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:07:58,371][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:07:58,371][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:07:58,374][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:07:58,374][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 15:07:58,989][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:07:58,989][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:07:59,113][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-11 15:07:59,114][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-11 15:07:59,167][inference][INFO] - + Forward pass peak memory: 459.95622399999996 (MB)
14
+ [2023-08-11 15:07:59,168][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-11 15:07:59,169][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-11 15:07:59,200][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-11 15:08:09,313][inference][INFO] - + Forward pass latency: 3.26e-03 (s)
18
+ [2023-08-11 15:08:09,316][inference][INFO] - + Forward pass throughput: 307.00 (samples/s)
19
+ [2023-08-11 15:08:09,316][inference][INFO] - Saving inference results
20
+ [2023-08-11 15:08:09,330][backend][INFO] - Cleaning backend
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,464.252928,0.00401,249.0,0.537,186.0
raw_results/2023-08-11_13:41:24_0acf56224bd98cbf0413218f362a3a61805a7844/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:08:13,953][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:08:13,953][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:08:14,132][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-11 15:08:14,132][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:08:14,132][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:08:14,132][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:08:14,134][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:08:14,134][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 15:08:14,889][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:08:14,890][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:08:15,092][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:08:15,140][inference][INFO] - + Forward pass peak memory: 464.252928 (MB)
13
+ [2023-08-11 15:08:15,141][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-11 15:08:15,174][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-11 15:08:25,269][inference][INFO] - + Forward pass latency: 4.01e-03 (s)
16
+ [2023-08-11 15:08:25,271][inference][INFO] - + Forward pass throughput: 249.00 (samples/s)
17
+ [2023-08-11 15:08:25,272][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-11 15:08:25,871][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-11 15:08:36,073][inference][INFO] - + Generation pass latency: 5.37e-01 (s)
20
+ [2023-08-11 15:08:36,074][inference][INFO] - + Generation pass throughput: 186.00 (tokens/s)
21
+ [2023-08-11 15:08:36,074][inference][INFO] - Saving inference results
22
+ [2023-08-11 15:08:36,088][backend][INFO] - Cleaning backend