fxmarty commited on
Commit
ec5ac36
1 Parent(s): 22bda64

Adding regression benchmark for the transformers SHA 11757e2bbd8ef89391ccb9ce0416420e16fa36f9

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.0314,31.8,5.92,33.8
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:09:28,811][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:09:28,812][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:09:29,100][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:09:29,101][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:09:29,101][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:09:29,257][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:09:29,270][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:09:29,270][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 15:10:35,676][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:10:35,678][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:10:43,799][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:10:45,078][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-11 15:10:45,079][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-11 15:10:45,079][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:10:45,393][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:11:05,716][inference][INFO] - + Forward pass latency: 3.14e-02 (s)
17
+ [2023-08-11 15:11:05,718][inference][INFO] - + Forward pass throughput: 31.80 (samples/s)
18
+ [2023-08-11 15:11:05,718][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:11:12,343][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:11:36,034][inference][INFO] - + Generation pass latency: 5.92e+00 (s)
21
+ [2023-08-11 15:11:36,038][inference][INFO] - + Generation pass throughput: 33.80 (tokens/s)
22
+ [2023-08-11 15:11:36,038][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:11:36,048][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0644,15.5,5.81,34.4
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:11:36,645][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:11:36,646][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:11:36,834][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:11:36,834][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:11:36,835][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:11:36,939][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:11:36,966][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:11:36,967][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 15:11:53,958][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:11:53,960][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:12:01,812][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:12:01,888][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-11 15:12:01,888][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-11 15:12:01,888][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:12:04,014][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:13:10,218][inference][INFO] - + Forward pass latency: 6.44e-02 (s)
17
+ [2023-08-11 15:13:10,218][inference][INFO] - + Forward pass throughput: 15.50 (samples/s)
18
+ [2023-08-11 15:13:10,219][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:13:15,874][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:13:39,114][inference][INFO] - + Generation pass latency: 5.81e+00 (s)
21
+ [2023-08-11 15:13:39,117][inference][INFO] - + Generation pass throughput: 34.40 (tokens/s)
22
+ [2023-08-11 15:13:39,118][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:13:39,123][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0319,62.7,6.29,63.6
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:13:39,638][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:13:39,639][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:13:39,890][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:13:39,890][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:13:39,890][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:13:39,983][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:13:40,008][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:13:40,009][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 15:13:50,578][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:13:50,580][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:13:58,467][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:13:58,510][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-11 15:13:58,510][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-11 15:13:58,511][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:13:58,986][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:14:29,160][inference][INFO] - + Forward pass latency: 3.19e-02 (s)
17
+ [2023-08-11 15:14:29,161][inference][INFO] - + Forward pass throughput: 62.70 (samples/s)
18
+ [2023-08-11 15:14:29,162][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:14:36,067][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:15:01,224][inference][INFO] - + Generation pass latency: 6.29e+00 (s)
21
+ [2023-08-11 15:15:01,227][inference][INFO] - + Generation pass throughput: 63.60 (tokens/s)
22
+ [2023-08-11 15:15:01,227][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:15:01,233][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30778.720255999997,0.109,18.3,7.05,56.7
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:15:01,685][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:15:01,686][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:15:01,874][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:15:01,874][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:15:01,874][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:15:01,976][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:15:02,001][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:15:02,002][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 15:15:18,818][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:15:18,820][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:15:26,791][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:15:26,917][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
+ [2023-08-11 15:15:26,917][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
+ [2023-08-11 15:15:26,917][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:15:30,750][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:16:41,026][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-11 15:16:41,027][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-11 15:16:41,027][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:16:48,085][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:17:09,223][inference][INFO] - + Generation pass latency: 7.05e+00 (s)
21
+ [2023-08-11 15:17:09,227][inference][INFO] - + Generation pass throughput: 56.70 (tokens/s)
22
+ [2023-08-11 15:17:09,227][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:17:09,233][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17000.431615999998,0.0323,124.0,6.32,127.0
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:17:09,780][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:17:09,781][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:17:09,976][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:17:09,977][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:17:09,977][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:17:10,078][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:17:10,103][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:17:10,104][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 15:17:20,642][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:17:20,643][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:17:28,438][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:17:28,484][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
13
+ [2023-08-11 15:17:28,484][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
14
+ [2023-08-11 15:17:28,485][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:17:29,230][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:18:15,927][inference][INFO] - + Forward pass latency: 3.23e-02 (s)
17
+ [2023-08-11 15:18:15,928][inference][INFO] - + Forward pass throughput: 124.00 (samples/s)
18
+ [2023-08-11 15:18:15,928][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:18:23,197][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:18:48,477][inference][INFO] - + Generation pass latency: 6.32e+00 (s)
21
+ [2023-08-11 15:18:48,480][inference][INFO] - + Generation pass throughput: 127.00 (tokens/s)
22
+ [2023-08-11 15:18:48,480][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:18:48,491][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.67,104.0
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:18:49,157][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:18:49,158][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:18:49,346][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:18:49,346][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:18:49,347][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:18:49,450][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:18:49,475][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:18:49,476][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 15:19:06,551][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:19:06,552][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:19:14,495][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:19:14,708][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-11 15:19:14,708][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-11 15:19:14,713][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:19:21,639][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:20:36,338][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-11 15:20:36,341][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-11 15:20:36,342][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:20:44,105][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:21:07,125][inference][INFO] - + Generation pass latency: 7.67e+00 (s)
21
+ [2023-08-11 15:21:07,126][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-11 15:21:07,126][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:21:07,133][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0982,163.0,7.01,456.0
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:21:07,692][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:21:07,694][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:21:07,881][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:21:07,881][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:21:07,881][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:21:07,982][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:21:08,008][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:21:08,009][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-11 15:21:18,843][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:21:18,845][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:21:26,778][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:21:26,890][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-11 15:21:26,890][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-11 15:21:26,890][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:21:29,513][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:22:23,560][inference][INFO] - + Forward pass latency: 9.82e-02 (s)
17
+ [2023-08-11 15:22:23,562][inference][INFO] - + Forward pass throughput: 163.00 (samples/s)
18
+ [2023-08-11 15:22:23,563][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:22:33,602][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:22:54,638][inference][INFO] - + Generation pass latency: 7.01e+00 (s)
21
+ [2023-08-11 15:22:54,642][inference][INFO] - + Generation pass throughput: 456.00 (tokens/s)
22
+ [2023-08-11 15:22:54,642][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:22:54,649][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.727,22.0,12.9,248.0
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:22:55,279][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:22:55,280][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:22:55,474][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-11 15:22:55,474][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:22:55,474][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:22:55,710][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:22:55,735][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:22:55,736][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-11 15:23:12,861][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:23:12,863][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:23:20,811][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:23:21,504][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-11 15:23:21,504][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-11 15:23:21,521][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-11 15:23:46,914][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-11 15:25:04,221][inference][INFO] - + Forward pass latency: 7.27e-01 (s)
17
+ [2023-08-11 15:25:04,222][inference][INFO] - + Forward pass throughput: 22.00 (samples/s)
18
+ [2023-08-11 15:25:04,223][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-11 15:25:18,327][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-11 15:25:44,215][inference][INFO] - + Generation pass latency: 1.29e+01 (s)
21
+ [2023-08-11 15:25:44,218][inference][INFO] - + Generation pass throughput: 248.00 (tokens/s)
22
+ [2023-08-11 15:25:44,218][inference][INFO] - Saving inference results
23
+ [2023-08-11 15:25:44,224][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,459.923456,0.00316,316.0
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:25:48,491][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:25:48,492][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:25:48,674][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-11 15:25:48,674][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:25:48,674][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:25:48,674][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:25:48,676][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:25:48,676][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 15:25:49,257][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:25:49,257][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:25:49,384][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-11 15:25:49,385][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-11 15:25:49,435][inference][INFO] - + Forward pass peak memory: 459.923456 (MB)
14
+ [2023-08-11 15:25:49,436][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-11 15:25:49,438][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-11 15:25:49,473][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-11 15:25:59,587][inference][INFO] - + Forward pass latency: 3.16e-03 (s)
18
+ [2023-08-11 15:25:59,589][inference][INFO] - + Forward pass throughput: 316.00 (samples/s)
19
+ [2023-08-11 15:25:59,589][inference][INFO] - Saving inference results
20
+ [2023-08-11 15:25:59,601][backend][INFO] - Cleaning backend
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,464.05222399999997,0.00341,293.0,0.482,207.0
raw_results/2023-08-11_14:09:31_11757e2bbd8ef89391ccb9ce0416420e16fa36f9/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-11 15:26:03,364][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-11 15:26:03,364][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-11 15:26:03,554][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-11 15:26:03,555][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-11 15:26:03,555][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-11 15:26:03,555][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-11 15:26:03,557][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-11 15:26:03,557][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-11 15:26:04,346][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-11 15:26:04,346][inference][INFO] - Running inference benchmark
11
+ [2023-08-11 15:26:04,550][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-11 15:26:04,594][inference][INFO] - + Forward pass peak memory: 464.05222399999997 (MB)
13
+ [2023-08-11 15:26:04,595][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-11 15:26:04,631][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-11 15:26:14,741][inference][INFO] - + Forward pass latency: 3.41e-03 (s)
16
+ [2023-08-11 15:26:14,743][inference][INFO] - + Forward pass throughput: 293.00 (samples/s)
17
+ [2023-08-11 15:26:14,744][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-11 15:26:15,235][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-11 15:26:25,351][inference][INFO] - + Generation pass latency: 4.82e-01 (s)
20
+ [2023-08-11 15:26:25,351][inference][INFO] - + Generation pass throughput: 207.00 (tokens/s)
21
+ [2023-08-11 15:26:25,352][inference][INFO] - Saving inference results
22
+ [2023-08-11 15:26:25,364][backend][INFO] - Cleaning backend