fxmarty commited on
Commit
bcb4aea
1 Parent(s): 1eaee47

Adding regression benchmark for the transformers SHA 454957c9bb5e8270c384ad46a1bb96928997c249

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,45927.497727999995,0.0328,30.5,6.25,32.0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 10:49:34,655][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 10:49:34,656][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 10:49:34,931][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 10:49:34,931][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 10:49:34,931][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 10:49:35,186][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 10:49:35,196][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 10:49:35,197][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 10:50:43,061][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 10:50:43,062][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 10:50:50,889][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 10:50:52,192][memory_tracker][INFO] - Peak memory usage: 45927.497727999995 MB
13
+ [2023-08-11 10:50:52,192][inference][INFO] - + Forward pass peak memory: 45927.497727999995 (MB)
14
+ [2023-08-11 10:50:52,193][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 10:50:52,522][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 10:51:12,798][inference][INFO] - + Forward pass latency: 3.28e-02 (s)
17
+ [2023-08-11 10:51:12,798][inference][INFO] - + Forward pass throughput: 30.50 (samples/s)
18
+ [2023-08-11 10:51:12,799][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 10:51:19,724][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 10:51:44,711][inference][INFO] - + Generation pass latency: 6.25e+00 (s)
21
+ [2023-08-11 10:51:44,713][inference][INFO] - + Generation pass throughput: 32.00 (tokens/s)
22
+ [2023-08-11 10:51:44,713][inference][INFO] - Saving inference results
23
+ [2023-08-11 10:51:44,722][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,60049.719295999996,0.0643,15.6,5.95,33.6
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 10:51:45,188][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 10:51:45,189][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 10:51:45,379][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 10:51:45,379][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 10:51:45,380][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 10:51:45,596][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 10:51:45,621][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 10:51:45,621][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 10:52:01,834][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 10:52:01,836][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 10:52:09,863][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 10:52:09,938][memory_tracker][INFO] - Peak memory usage: 60049.719295999996 MB
13
+ [2023-08-11 10:52:09,939][inference][INFO] - + Forward pass peak memory: 60049.719295999996 (MB)
14
+ [2023-08-11 10:52:09,939][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 10:52:12,068][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 10:53:18,275][inference][INFO] - + Forward pass latency: 6.43e-02 (s)
17
+ [2023-08-11 10:53:18,276][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
+ [2023-08-11 10:53:18,276][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 10:53:24,228][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 10:53:48,012][inference][INFO] - + Generation pass latency: 5.95e+00 (s)
21
+ [2023-08-11 10:53:48,014][inference][INFO] - + Generation pass throughput: 33.60 (tokens/s)
22
+ [2023-08-11 10:53:48,014][inference][INFO] - Saving inference results
23
+ [2023-08-11 10:53:48,020][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,46114.144256,0.0329,60.8,6.54,61.2
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 10:53:48,503][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 10:53:48,504][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 10:53:48,726][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 10:53:48,726][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 10:53:48,726][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 10:53:48,941][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 10:53:48,962][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 10:53:48,963][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 10:53:59,445][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 10:53:59,447][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 10:54:07,201][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 10:54:07,242][memory_tracker][INFO] - Peak memory usage: 46114.144256 MB
13
+ [2023-08-11 10:54:07,242][inference][INFO] - + Forward pass peak memory: 46114.144256 (MB)
14
+ [2023-08-11 10:54:07,243][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 10:54:07,734][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 10:54:37,015][inference][INFO] - + Forward pass latency: 3.29e-02 (s)
17
+ [2023-08-11 10:54:37,016][inference][INFO] - + Forward pass throughput: 60.80 (samples/s)
18
+ [2023-08-11 10:54:37,017][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 10:54:44,275][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 10:55:10,454][inference][INFO] - + Generation pass latency: 6.54e+00 (s)
21
+ [2023-08-11 10:55:10,457][inference][INFO] - + Generation pass throughput: 61.20 (tokens/s)
22
+ [2023-08-11 10:55:10,457][inference][INFO] - Saving inference results
23
+ [2023-08-11 10:55:10,463][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,60513.189888,0.109,18.3,7.08,56.5
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 10:55:10,909][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 10:55:10,910][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 10:55:11,103][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 10:55:11,104][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 10:55:11,104][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 10:55:11,321][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 10:55:11,342][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 10:55:11,343][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 10:55:27,577][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 10:55:27,579][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 10:55:35,328][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 10:55:35,453][memory_tracker][INFO] - Peak memory usage: 60513.189888 MB
13
+ [2023-08-11 10:55:35,453][inference][INFO] - + Forward pass peak memory: 60513.189888 (MB)
14
+ [2023-08-11 10:55:35,454][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 10:55:39,277][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 10:56:49,906][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-11 10:56:49,907][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-11 10:56:49,907][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 10:56:57,004][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 10:57:18,237][inference][INFO] - + Generation pass latency: 7.08e+00 (s)
21
+ [2023-08-11 10:57:18,239][inference][INFO] - + Generation pass throughput: 56.50 (tokens/s)
22
+ [2023-08-11 10:57:18,239][inference][INFO] - Saving inference results
23
+ [2023-08-11 10:57:18,245][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,46734.901247999995,0.0331,121.0,6.5,123.0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 10:57:18,755][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 10:57:18,756][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 10:57:18,953][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 10:57:18,954][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 10:57:18,954][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 10:57:19,171][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 10:57:19,192][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 10:57:19,193][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 10:57:29,675][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 10:57:29,677][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 10:57:37,424][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 10:57:37,473][memory_tracker][INFO] - Peak memory usage: 46734.901247999995 MB
13
+ [2023-08-11 10:57:37,474][inference][INFO] - + Forward pass peak memory: 46734.901247999995 (MB)
14
+ [2023-08-11 10:57:37,474][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 10:57:38,229][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 10:58:23,744][inference][INFO] - + Forward pass latency: 3.31e-02 (s)
17
+ [2023-08-11 10:58:23,745][inference][INFO] - + Forward pass throughput: 121.00 (samples/s)
18
+ [2023-08-11 10:58:23,745][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 10:58:31,236][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 10:58:57,221][inference][INFO] - + Generation pass latency: 6.50e+00 (s)
21
+ [2023-08-11 10:58:57,224][inference][INFO] - + Generation pass throughput: 123.00 (tokens/s)
22
+ [2023-08-11 10:58:57,224][inference][INFO] - Saving inference results
23
+ [2023-08-11 10:58:57,236][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,61213.638655999996,0.187,21.4,7.7,104.0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 10:58:57,748][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 10:58:57,749][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 10:58:57,941][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 10:58:57,941][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 10:58:57,942][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 10:58:58,158][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 10:58:58,180][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 10:58:58,181][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 10:59:14,616][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 10:59:14,617][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 10:59:22,390][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 10:59:22,593][memory_tracker][INFO] - Peak memory usage: 61213.638655999996 MB
13
+ [2023-08-11 10:59:22,593][inference][INFO] - + Forward pass peak memory: 61213.638655999996 (MB)
14
+ [2023-08-11 10:59:22,597][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 10:59:29,522][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 11:00:44,186][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-11 11:00:44,189][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-11 11:00:44,189][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 11:00:51,996][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 11:01:15,108][inference][INFO] - + Generation pass latency: 7.70e+00 (s)
21
+ [2023-08-11 11:01:15,111][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-11 11:01:15,111][inference][INFO] - Saving inference results
23
+ [2023-08-11 11:01:15,117][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,49230.512127999995,0.0986,162.0,6.72,476.0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 11:01:15,684][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 11:01:15,684][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 11:01:15,906][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 11:01:15,906][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 11:01:15,906][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 11:01:16,130][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 11:01:16,153][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 11:01:16,153][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 11:01:26,721][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 11:01:26,722][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 11:01:34,439][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 11:01:34,550][memory_tracker][INFO] - Peak memory usage: 49230.512127999995 MB
13
+ [2023-08-11 11:01:34,550][inference][INFO] - + Forward pass peak memory: 49230.512127999995 (MB)
14
+ [2023-08-11 11:01:34,551][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 11:01:37,219][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 11:02:31,212][inference][INFO] - + Forward pass latency: 9.86e-02 (s)
17
+ [2023-08-11 11:02:31,213][inference][INFO] - + Forward pass throughput: 162.00 (samples/s)
18
+ [2023-08-11 11:02:31,213][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 11:02:38,191][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 11:02:58,363][inference][INFO] - + Generation pass latency: 6.72e+00 (s)
21
+ [2023-08-11 11:02:58,365][inference][INFO] - + Generation pass throughput: 476.00 (tokens/s)
22
+ [2023-08-11 11:02:58,365][inference][INFO] - Saving inference results
23
+ [2023-08-11 11:02:58,371][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,65556.840448,0.684,23.4,13.0,246.0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 11:02:58,952][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 11:02:58,952][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 11:02:59,180][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 11:02:59,180][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 11:02:59,180][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 11:02:59,397][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 11:02:59,419][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 11:02:59,420][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 11:03:16,160][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 11:03:16,162][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 11:03:23,886][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 11:03:24,597][memory_tracker][INFO] - Peak memory usage: 65556.840448 MB
13
+ [2023-08-11 11:03:24,597][inference][INFO] - + Forward pass peak memory: 65556.840448 (MB)
14
+ [2023-08-11 11:03:24,614][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 11:03:49,983][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 11:05:07,975][inference][INFO] - + Forward pass latency: 6.84e-01 (s)
17
+ [2023-08-11 11:05:07,977][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-11 11:05:07,977][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 11:05:22,446][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 11:05:48,409][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
+ [2023-08-11 11:05:48,412][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
+ [2023-08-11 11:05:48,412][inference][INFO] - Saving inference results
23
+ [2023-08-11 11:05:48,418][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,459.984896,0.0036,278.0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 11:05:52,884][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 11:05:52,885][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 11:05:53,076][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-11 11:05:53,076][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 11:05:53,076][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 11:05:53,076][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 11:05:53,078][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 11:05:53,078][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 11:05:53,671][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 11:05:53,672][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 11:05:53,814][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-11 11:05:53,815][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-11 11:05:53,866][inference][INFO] - + Forward pass peak memory: 459.984896 (MB)
14
+ [2023-08-11 11:05:53,867][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-11 11:05:53,868][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-11 11:05:53,905][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-11 11:06:04,004][inference][INFO] - + Forward pass latency: 3.60e-03 (s)
18
+ [2023-08-11 11:06:04,006][inference][INFO] - + Forward pass throughput: 278.00 (samples/s)
19
+ [2023-08-11 11:06:04,006][inference][INFO] - Saving inference results
20
+ [2023-08-11 11:06:04,018][backend][INFO] - Cleaning backend
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,464.142336,0.00393,254.0,0.526,190.0
raw_results/2023-08-11_09:39:57_454957c9bb5e8270c384ad46a1bb96928997c249/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 11:06:07,800][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 11:06:07,800][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 11:06:08,010][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-11 11:06:08,010][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 11:06:08,010][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 11:06:08,010][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 11:06:08,012][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 11:06:08,012][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 11:06:08,674][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 11:06:08,675][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 11:06:08,873][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 11:06:08,918][inference][INFO] - + Forward pass peak memory: 464.142336 (MB)
13
+ [2023-08-11 11:06:08,919][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-11 11:06:08,956][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-11 11:06:19,047][inference][INFO] - + Forward pass latency: 3.93e-03 (s)
16
+ [2023-08-11 11:06:19,049][inference][INFO] - + Forward pass throughput: 254.00 (samples/s)
17
+ [2023-08-11 11:06:19,050][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-11 11:06:19,640][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-11 11:06:29,643][inference][INFO] - + Generation pass latency: 5.26e-01 (s)
20
+ [2023-08-11 11:06:29,644][inference][INFO] - + Generation pass throughput: 190.00 (tokens/s)
21
+ [2023-08-11 11:06:29,644][inference][INFO] - Saving inference results
22
+ [2023-08-11 11:06:29,656][backend][INFO] - Cleaning backend