fxmarty commited on
Commit
7f9cc2e
1 Parent(s): e42428a

Adding regression benchmark for the transformers SHA 0ebe7ae16076f727ac40c47f8f9167013c4596d8

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.0313,31.9,5.94,33.7
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:25:49,932][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:25:49,933][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:25:50,222][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:25:50,223][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:25:50,223][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:25:50,364][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:25:50,378][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:25:50,379][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:26:57,206][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:26:57,208][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:27:05,901][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:27:07,160][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-13 19:27:07,160][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-13 19:27:07,161][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:27:07,474][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:27:27,814][inference][INFO] - + Forward pass latency: 3.13e-02 (s)
17
+ [2023-08-13 19:27:27,815][inference][INFO] - + Forward pass throughput: 31.90 (samples/s)
18
+ [2023-08-13 19:27:27,815][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:27:34,454][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:27:58,214][inference][INFO] - + Generation pass latency: 5.94e+00 (s)
21
+ [2023-08-13 19:27:58,217][inference][INFO] - + Generation pass throughput: 33.70 (tokens/s)
22
+ [2023-08-13 19:27:58,217][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:27:58,227][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0642,15.6,5.59,35.8
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:27:58,705][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:27:58,706][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:27:58,894][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:27:58,895][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:27:58,895][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:27:58,996][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:27:59,036][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:27:59,037][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:28:16,508][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:28:16,509][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:28:25,136][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:28:25,212][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-13 19:28:25,212][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-13 19:28:25,213][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:28:27,335][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:29:33,613][inference][INFO] - + Forward pass latency: 6.42e-02 (s)
17
+ [2023-08-13 19:29:33,614][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
+ [2023-08-13 19:29:33,614][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:29:39,239][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:30:01,591][inference][INFO] - + Generation pass latency: 5.59e+00 (s)
21
+ [2023-08-13 19:30:01,593][inference][INFO] - + Generation pass throughput: 35.80 (tokens/s)
22
+ [2023-08-13 19:30:01,594][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:30:01,601][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0309,64.7,6.06,66.0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:30:02,091][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:30:02,092][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:30:02,290][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:30:02,290][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:30:02,290][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:30:02,393][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:30:02,432][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:30:02,433][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:30:13,617][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:30:13,618][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:30:22,093][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:30:22,132][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-13 19:30:22,132][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-13 19:30:22,132][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:30:22,603][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:30:53,684][inference][INFO] - + Forward pass latency: 3.09e-02 (s)
17
+ [2023-08-13 19:30:53,685][inference][INFO] - + Forward pass throughput: 64.70 (samples/s)
18
+ [2023-08-13 19:30:53,686][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:31:00,533][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:31:24,767][inference][INFO] - + Generation pass latency: 6.06e+00 (s)
21
+ [2023-08-13 19:31:24,769][inference][INFO] - + Generation pass throughput: 66.00 (tokens/s)
22
+ [2023-08-13 19:31:24,769][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:31:24,777][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30778.720255999997,0.109,18.3,7.04,56.8
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:31:25,259][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:31:25,260][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:31:25,445][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:31:25,445][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:31:25,445][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:31:25,550][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:31:25,590][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:31:25,591][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:31:43,308][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:31:43,310][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:31:51,835][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:31:51,960][memory_tracker][INFO] - Peak memory usage: 30778.720255999997 MB
13
+ [2023-08-13 19:31:51,960][inference][INFO] - + Forward pass peak memory: 30778.720255999997 (MB)
14
+ [2023-08-13 19:31:51,960][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:31:55,776][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:33:06,304][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-13 19:33:06,305][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-13 19:33:06,306][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:33:13,349][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:33:34,460][inference][INFO] - + Generation pass latency: 7.04e+00 (s)
21
+ [2023-08-13 19:33:34,462][inference][INFO] - + Generation pass throughput: 56.80 (tokens/s)
22
+ [2023-08-13 19:33:34,462][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:33:34,470][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17000.431615999998,0.0311,129.0,6.08,132.0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:33:34,971][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:33:34,972][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:33:35,153][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:33:35,154][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:33:35,154][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:33:35,258][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:33:35,297][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:33:35,297][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:33:46,533][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:33:46,534][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:33:55,132][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:33:55,178][memory_tracker][INFO] - Peak memory usage: 17000.431615999998 MB
13
+ [2023-08-13 19:33:55,178][inference][INFO] - + Forward pass peak memory: 17000.431615999998 (MB)
14
+ [2023-08-13 19:33:55,179][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:33:55,931][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:34:44,356][inference][INFO] - + Forward pass latency: 3.11e-02 (s)
17
+ [2023-08-13 19:34:44,357][inference][INFO] - + Forward pass throughput: 129.00 (samples/s)
18
+ [2023-08-13 19:34:44,358][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:34:51,415][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:35:15,755][inference][INFO] - + Generation pass latency: 6.08e+00 (s)
21
+ [2023-08-13 19:35:15,757][inference][INFO] - + Generation pass throughput: 132.00 (tokens/s)
22
+ [2023-08-13 19:35:15,757][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:35:15,772][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.67,104.0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:35:16,324][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:35:16,325][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:35:16,533][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:35:16,533][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:35:16,533][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:35:16,635][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:35:16,673][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:35:16,674][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:35:34,169][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:35:34,170][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:35:42,819][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:35:43,030][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-13 19:35:43,030][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-13 19:35:43,034][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:35:49,954][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:37:05,276][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-13 19:37:05,277][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-13 19:37:05,278][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:37:13,013][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:37:36,022][inference][INFO] - + Generation pass latency: 7.67e+00 (s)
21
+ [2023-08-13 19:37:36,022][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-13 19:37:36,022][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:37:36,029][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0987,162.0,6.29,509.0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:37:36,593][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:37:36,595][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:37:36,781][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:37:36,781][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:37:36,782][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:37:36,885][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:37:36,925][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:37:36,926][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:37:48,363][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:37:48,365][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:37:56,865][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:37:56,976][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-13 19:37:56,976][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-13 19:37:56,977][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:37:59,631][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:38:53,662][inference][INFO] - + Forward pass latency: 9.87e-02 (s)
17
+ [2023-08-13 19:38:53,663][inference][INFO] - + Forward pass throughput: 162.00 (samples/s)
18
+ [2023-08-13 19:38:53,663][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:39:00,168][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:39:25,332][inference][INFO] - + Generation pass latency: 6.29e+00 (s)
21
+ [2023-08-13 19:39:25,334][inference][INFO] - + Generation pass throughput: 509.00 (tokens/s)
22
+ [2023-08-13 19:39:25,334][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:39:25,341][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.684,23.4,12.9,248.0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:39:26,048][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:39:26,049][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:39:26,235][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:39:26,236][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:39:26,236][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:39:26,336][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:39:26,376][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:39:26,377][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:39:44,258][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:39:44,260][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:39:52,830][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:39:53,542][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-13 19:39:53,543][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-13 19:39:53,559][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:40:18,928][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:41:36,950][inference][INFO] - + Forward pass latency: 6.84e-01 (s)
17
+ [2023-08-13 19:41:36,951][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-13 19:41:36,951][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:41:50,584][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:42:16,481][inference][INFO] - + Generation pass latency: 1.29e+01 (s)
21
+ [2023-08-13 19:42:16,483][inference][INFO] - + Generation pass throughput: 248.00 (tokens/s)
22
+ [2023-08-13 19:42:16,483][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:42:16,490][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,459.374592,0.0037,270.0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:42:21,393][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:42:21,395][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:42:21,587][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-13 19:42:21,587][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:42:21,587][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:42:21,588][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:42:21,589][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:42:21,590][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-13 19:42:22,190][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:42:22,191][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:42:22,316][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-13 19:42:22,318][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-13 19:42:22,369][inference][INFO] - + Forward pass peak memory: 459.374592 (MB)
14
+ [2023-08-13 19:42:22,371][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-13 19:42:22,372][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-13 19:42:22,415][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-13 19:42:32,517][inference][INFO] - + Forward pass latency: 3.70e-03 (s)
18
+ [2023-08-13 19:42:32,519][inference][INFO] - + Forward pass throughput: 270.00 (samples/s)
19
+ [2023-08-13 19:42:32,520][inference][INFO] - Saving inference results
20
+ [2023-08-13 19:42:32,535][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,463.91296,0.00399,251.0,0.504,198.0
raw_results/2023-08-13_17:47:16_0ebe7ae16076f727ac40c47f8f9167013c4596d8/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:42:37,066][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:42:37,067][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:42:37,252][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-13 19:42:37,252][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:42:37,253][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:42:37,253][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:42:37,256][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:42:37,256][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-13 19:42:37,903][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:42:37,903][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:42:38,112][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:42:38,160][inference][INFO] - + Forward pass peak memory: 463.91296 (MB)
13
+ [2023-08-13 19:42:38,162][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-13 19:42:38,195][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-13 19:42:48,286][inference][INFO] - + Forward pass latency: 3.99e-03 (s)
16
+ [2023-08-13 19:42:48,288][inference][INFO] - + Forward pass throughput: 251.00 (samples/s)
17
+ [2023-08-13 19:42:48,289][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-13 19:42:48,884][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-13 19:42:58,957][inference][INFO] - + Generation pass latency: 5.04e-01 (s)
20
+ [2023-08-13 19:42:58,957][inference][INFO] - + Generation pass throughput: 198.00 (tokens/s)
21
+ [2023-08-13 19:42:58,958][inference][INFO] - Saving inference results
22
+ [2023-08-13 19:42:58,973][backend][INFO] - Cleaning backend