fxmarty commited on
Commit
86c199f
1 Parent(s): 56165f1

Adding regression benchmark for the transformers SHA 892f9ea0db18d1bef81ea45221f53745c03509f5

Browse files
Files changed (30) hide show
  1. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/0/hydra_config.yaml +66 -0
  2. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/0/inference_results.csv +2 -0
  3. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/0/main.log +23 -0
  4. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/1/hydra_config.yaml +66 -0
  5. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/1/inference_results.csv +2 -0
  6. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/1/main.log +23 -0
  7. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/2/hydra_config.yaml +66 -0
  8. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/2/inference_results.csv +2 -0
  9. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/2/main.log +23 -0
  10. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/3/hydra_config.yaml +66 -0
  11. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/3/inference_results.csv +2 -0
  12. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/3/main.log +23 -0
  13. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/4/hydra_config.yaml +66 -0
  14. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/4/inference_results.csv +2 -0
  15. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/4/main.log +23 -0
  16. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/5/hydra_config.yaml +66 -0
  17. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/5/inference_results.csv +2 -0
  18. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/5/main.log +23 -0
  19. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/6/hydra_config.yaml +66 -0
  20. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/6/inference_results.csv +2 -0
  21. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/6/main.log +23 -0
  22. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/7/hydra_config.yaml +66 -0
  23. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/7/inference_results.csv +2 -0
  24. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/7/main.log +23 -0
  25. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_bert_inference/0/hydra_config.yaml +66 -0
  26. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_bert_inference/0/inference_results.csv +2 -0
  27. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_bert_inference/0/main.log +20 -0
  28. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_gpt2_inference/0/hydra_config.yaml +66 -0
  29. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_gpt2_inference/0/inference_results.csv +2 -0
  30. raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_gpt2_inference/0/main.log +22 -0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16195.125247999998,0.0309,32.4,5.89,34.0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/0/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 18:49:34,478][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 18:49:34,480][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 18:49:34,805][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 18:49:34,805][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 18:49:34,805][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 18:49:34,946][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 18:49:34,963][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 18:49:34,964][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 18:50:48,347][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 18:50:48,349][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 18:50:57,041][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 18:50:58,294][memory_tracker][INFO] - Peak memory usage: 16195.125247999998 MB
13
+ [2023-08-13 18:50:58,294][inference][INFO] - + Forward pass peak memory: 16195.125247999998 (MB)
14
+ [2023-08-13 18:50:58,294][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 18:50:58,605][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 18:51:18,958][inference][INFO] - + Forward pass latency: 3.09e-02 (s)
17
+ [2023-08-13 18:51:18,959][inference][INFO] - + Forward pass throughput: 32.40 (samples/s)
18
+ [2023-08-13 18:51:18,959][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 18:51:25,535][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 18:51:49,084][inference][INFO] - + Generation pass latency: 5.89e+00 (s)
21
+ [2023-08-13 18:51:49,086][inference][INFO] - + Generation pass throughput: 34.00 (tokens/s)
22
+ [2023-08-13 18:51:49,086][inference][INFO] - Saving inference results
23
+ [2023-08-13 18:51:49,096][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30317.346815999997,0.0642,15.6,5.58,35.8
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/1/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 18:51:49,565][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 18:51:49,566][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 18:51:49,758][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 18:51:49,758][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 18:51:49,759][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 18:51:49,863][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 18:51:49,904][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 18:51:49,905][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 18:52:07,694][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 18:52:07,696][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 18:52:16,365][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 18:52:16,441][memory_tracker][INFO] - Peak memory usage: 30317.346815999997 MB
13
+ [2023-08-13 18:52:16,441][inference][INFO] - + Forward pass peak memory: 30317.346815999997 (MB)
14
+ [2023-08-13 18:52:16,442][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 18:52:18,574][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 18:53:24,911][inference][INFO] - + Forward pass latency: 6.42e-02 (s)
17
+ [2023-08-13 18:53:24,912][inference][INFO] - + Forward pass throughput: 15.60 (samples/s)
18
+ [2023-08-13 18:53:24,912][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 18:53:30,520][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 18:53:52,855][inference][INFO] - + Generation pass latency: 5.58e+00 (s)
21
+ [2023-08-13 18:53:52,857][inference][INFO] - + Generation pass throughput: 35.80 (tokens/s)
22
+ [2023-08-13 18:53:52,857][inference][INFO] - Saving inference results
23
+ [2023-08-13 18:53:52,864][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,16381.771776,0.0307,65.1,6.05,66.1
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/2/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 18:53:53,348][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 18:53:53,350][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 18:53:53,543][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 18:53:53,543][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 18:53:53,544][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 18:53:53,648][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 18:53:53,687][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 18:53:53,688][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 18:54:05,314][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 18:54:05,316][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 18:54:13,859][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 18:54:13,897][memory_tracker][INFO] - Peak memory usage: 16381.771776 MB
13
+ [2023-08-13 18:54:13,897][inference][INFO] - + Forward pass peak memory: 16381.771776 (MB)
14
+ [2023-08-13 18:54:13,898][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 18:54:14,384][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 18:54:45,661][inference][INFO] - + Forward pass latency: 3.07e-02 (s)
17
+ [2023-08-13 18:54:45,662][inference][INFO] - + Forward pass throughput: 65.10 (samples/s)
18
+ [2023-08-13 18:54:45,663][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 18:54:52,508][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 18:55:16,709][inference][INFO] - + Generation pass latency: 6.05e+00 (s)
21
+ [2023-08-13 18:55:16,711][inference][INFO] - + Generation pass throughput: 66.10 (tokens/s)
22
+ [2023-08-13 18:55:16,711][inference][INFO] - Saving inference results
23
+ [2023-08-13 18:55:16,718][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 2
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,30780.817408,0.109,18.3,7.06,56.7
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/3/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 18:55:17,190][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 18:55:17,192][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 18:55:17,378][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 18:55:17,378][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 18:55:17,378][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 18:55:17,482][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 18:55:17,521][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 18:55:17,522][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 18:55:34,983][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 18:55:34,985][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 18:55:43,574][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 18:55:43,699][memory_tracker][INFO] - Peak memory usage: 30780.817408 MB
13
+ [2023-08-13 18:55:43,699][inference][INFO] - + Forward pass peak memory: 30780.817408 (MB)
14
+ [2023-08-13 18:55:43,700][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 18:55:47,515][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 18:56:58,111][inference][INFO] - + Forward pass latency: 1.09e-01 (s)
17
+ [2023-08-13 18:56:58,112][inference][INFO] - + Forward pass throughput: 18.30 (samples/s)
18
+ [2023-08-13 18:56:58,112][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 18:57:05,174][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 18:57:26,348][inference][INFO] - + Generation pass latency: 7.06e+00 (s)
21
+ [2023-08-13 18:57:26,350][inference][INFO] - + Generation pass throughput: 56.70 (tokens/s)
22
+ [2023-08-13 18:57:26,350][inference][INFO] - Saving inference results
23
+ [2023-08-13 18:57:26,358][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/4/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/4/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,17002.528768,0.0312,128.0,6.13,131.0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/4/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 18:57:26,862][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 18:57:26,863][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 18:57:27,052][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 18:57:27,052][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 18:57:27,052][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 18:57:27,160][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 18:57:27,200][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 18:57:27,201][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 18:57:38,567][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 18:57:38,568][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 18:57:47,091][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 18:57:47,137][memory_tracker][INFO] - Peak memory usage: 17002.528768 MB
13
+ [2023-08-13 18:57:47,137][inference][INFO] - + Forward pass peak memory: 17002.528768 (MB)
14
+ [2023-08-13 18:57:47,137][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 18:57:47,895][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 18:58:36,284][inference][INFO] - + Forward pass latency: 3.12e-02 (s)
17
+ [2023-08-13 18:58:36,285][inference][INFO] - + Forward pass throughput: 128.00 (samples/s)
18
+ [2023-08-13 18:58:36,286][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 18:58:43,398][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 18:59:07,914][inference][INFO] - + Generation pass latency: 6.13e+00 (s)
21
+ [2023-08-13 18:59:07,916][inference][INFO] - + Generation pass throughput: 131.00 (tokens/s)
22
+ [2023-08-13 18:59:07,916][inference][INFO] - Saving inference results
23
+ [2023-08-13 18:59:07,923][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/5/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 4
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/5/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,31481.266175999997,0.187,21.4,7.69,104.0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/5/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 18:59:08,410][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 18:59:08,412][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 18:59:08,643][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 18:59:08,643][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 18:59:08,643][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 18:59:08,746][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 18:59:08,786][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 18:59:08,787][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 18:59:26,648][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 18:59:26,650][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 18:59:35,224][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 18:59:35,437][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB
13
+ [2023-08-13 18:59:35,437][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB)
14
+ [2023-08-13 18:59:35,442][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 18:59:42,372][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:00:57,053][inference][INFO] - + Forward pass latency: 1.87e-01 (s)
17
+ [2023-08-13 19:00:57,055][inference][INFO] - + Forward pass throughput: 21.40 (samples/s)
18
+ [2023-08-13 19:00:57,056][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:01:04,809][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:01:27,891][inference][INFO] - + Generation pass latency: 7.69e+00 (s)
21
+ [2023-08-13 19:01:27,893][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s)
22
+ [2023-08-13 19:01:27,893][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:01:27,900][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/6/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float16
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/6/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,19498.139648,0.0987,162.0,8.13,394.0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/6/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:01:28,477][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:01:28,478][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:01:28,675][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:01:28,675][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:01:28,675][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:01:28,778][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:01:28,818][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:01:28,819][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda
9
+ [2023-08-13 19:01:40,467][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:01:40,468][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:01:49,130][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:01:49,242][memory_tracker][INFO] - Peak memory usage: 19498.139648 MB
13
+ [2023-08-13 19:01:49,242][inference][INFO] - + Forward pass peak memory: 19498.139648 (MB)
14
+ [2023-08-13 19:01:49,243][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:01:51,911][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:02:45,933][inference][INFO] - + Forward pass latency: 9.87e-02 (s)
17
+ [2023-08-13 19:02:45,934][inference][INFO] - + Forward pass throughput: 162.00 (samples/s)
18
+ [2023-08-13 19:02:45,935][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:02:54,299][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:03:18,702][inference][INFO] - + Generation pass latency: 8.13e+00 (s)
21
+ [2023-08-13 19:03:18,704][inference][INFO] - + Generation pass throughput: 394.00 (tokens/s)
22
+ [2023-08-13 19:03:18,704][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:03:18,711][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/7/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: float32
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 20
35
+ input_shapes:
36
+ batch_size: 16
37
+ sequence_length: 200
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 200
48
+ experiment_name: llama_1gpu_inference
49
+ model: togethercomputer/LLaMA-2-7B-32K
50
+ device: cuda
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/7/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,35824.467968,0.684,23.4,13.0,246.0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/llama_1gpu_inference/7/main.log ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:03:19,319][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:03:19,320][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:03:19,512][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-08-13 19:03:19,512][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:03:19,513][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:03:19,614][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:03:19,654][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:03:19,655][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda
9
+ [2023-08-13 19:03:38,084][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:03:38,085][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:03:46,788][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:03:47,492][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB
13
+ [2023-08-13 19:03:47,492][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB)
14
+ [2023-08-13 19:03:47,508][inference][INFO] - + Warming up the forward pass
15
+ [2023-08-13 19:04:12,871][inference][INFO] - + Tracking forward pass latency and throughput
16
+ [2023-08-13 19:05:30,856][inference][INFO] - + Forward pass latency: 6.84e-01 (s)
17
+ [2023-08-13 19:05:30,858][inference][INFO] - + Forward pass throughput: 23.40 (samples/s)
18
+ [2023-08-13 19:05:30,858][inference][INFO] - + Warming up the generation pass
19
+ [2023-08-13 19:05:44,515][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-08-13 19:06:10,449][inference][INFO] - + Generation pass latency: 1.30e+01 (s)
21
+ [2023-08-13 19:06:10,451][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s)
22
+ [2023-08-13 19:06:10,452][inference][INFO] - Saving inference results
23
+ [2023-08-13 19:06:10,459][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_bert_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_bert_inference
49
+ model: hf-internal-testing/tiny-random-bert
50
+ device: cpu
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_bert_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s)
2
+ 0,460.14464,0.00327,306.0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_bert_inference/0/main.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:06:15,396][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:06:15,398][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:06:15,579][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
4
+ [2023-08-13 19:06:15,579][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:06:15,579][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:06:15,579][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:06:15,581][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:06:15,581][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-13 19:06:16,189][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:06:16,189][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:06:16,309][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
12
+ [2023-08-13 19:06:16,310][inference][INFO] - + Tracking forward pass peak memory
13
+ [2023-08-13 19:06:16,361][inference][INFO] - + Forward pass peak memory: 460.14464 (MB)
14
+ [2023-08-13 19:06:16,362][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids']
15
+ [2023-08-13 19:06:16,363][inference][INFO] - + Warming up the forward pass
16
+ [2023-08-13 19:06:16,397][inference][INFO] - + Tracking forward pass latency and throughput
17
+ [2023-08-13 19:06:26,504][inference][INFO] - + Forward pass latency: 3.27e-03 (s)
18
+ [2023-08-13 19:06:26,506][inference][INFO] - + Forward pass throughput: 306.00 (samples/s)
19
+ [2023-08-13 19:06:26,506][inference][INFO] - Saving inference results
20
+ [2023-08-13 19:06:26,521][backend][INFO] - Cleaning backend
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_gpt2_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.0.1+cu117
4
+ _target_: optimum_benchmark.backends.pytorch.PyTorchBackend
5
+ inter_op_num_threads: null
6
+ intra_op_num_threads: null
7
+ initial_isolation_check: true
8
+ continous_isolation_check: true
9
+ delete_cache: false
10
+ no_weights: false
11
+ torch_dtype: null
12
+ device_map: null
13
+ load_in_8bit: false
14
+ load_in_4bit: false
15
+ bettertransformer: false
16
+ torch_compile: false
17
+ torch_compile_config:
18
+ fullgraph: false
19
+ dynamic: false
20
+ backend: inductor
21
+ mode: null
22
+ options: null
23
+ disable: false
24
+ amp_autocast: false
25
+ amp_dtype: null
26
+ disable_grad: true
27
+ eval_mode: true
28
+ benchmark:
29
+ name: inference
30
+ _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark
31
+ seed: 42
32
+ memory: true
33
+ warmup_runs: 10
34
+ benchmark_duration: 10
35
+ input_shapes:
36
+ batch_size: 1
37
+ sequence_length: 16
38
+ num_choices: 4
39
+ width: 64
40
+ height: 64
41
+ num_channels: 3
42
+ point_batch_size: 3
43
+ nb_points_per_image: 2
44
+ feature_size: 80
45
+ nb_max_frames: 3000
46
+ audio_sequence_length: 16000
47
+ new_tokens: 100
48
+ experiment_name: pytorch_gpt2_inference
49
+ model: hf-internal-testing/tiny-random-gpt2
50
+ device: cpu
51
+ task: text-generation
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.11.0
59
+ transformers_version: 4.32.0.dev0
60
+ accelerate_version: 0.21.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1204539.797504
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_gpt2_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0,463.76959999999997,0.00322,311.0,0.49,204.0
raw_results/2023-08-13_17:26:40_892f9ea0db18d1bef81ea45221f53745c03509f5/pytorch_gpt2_inference/0/main.log ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-08-13 19:06:31,060][benchmark][INFO] - Configuring inference benchmark
2
+ [2023-08-13 19:06:31,062][benchmark][INFO] - + Setting seed(42)
3
+ [2023-08-13 19:06:31,241][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2
4
+ [2023-08-13 19:06:31,241][backend][INFO] - Configuring pytorch backend
5
+ [2023-08-13 19:06:31,241][backend][INFO] - + Checking initial device isolation
6
+ [2023-08-13 19:06:31,241][backend][INFO] - + Checking contineous device isolation
7
+ [2023-08-13 19:06:31,243][pytorch][INFO] - + Disabling gradients
8
+ [2023-08-13 19:06:31,243][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu
9
+ [2023-08-13 19:06:31,924][pytorch][INFO] - + Turning on eval mode
10
+ [2023-08-13 19:06:31,924][inference][INFO] - Running inference benchmark
11
+ [2023-08-13 19:06:32,134][inference][INFO] - + Tracking forward pass peak memory
12
+ [2023-08-13 19:06:32,186][inference][INFO] - + Forward pass peak memory: 463.76959999999997 (MB)
13
+ [2023-08-13 19:06:32,188][inference][INFO] - + Warming up the forward pass
14
+ [2023-08-13 19:06:32,226][inference][INFO] - + Tracking forward pass latency and throughput
15
+ [2023-08-13 19:06:42,335][inference][INFO] - + Forward pass latency: 3.22e-03 (s)
16
+ [2023-08-13 19:06:42,338][inference][INFO] - + Forward pass throughput: 311.00 (samples/s)
17
+ [2023-08-13 19:06:42,339][inference][INFO] - + Warming up the generation pass
18
+ [2023-08-13 19:06:42,839][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-08-13 19:06:53,136][inference][INFO] - + Generation pass latency: 4.90e-01 (s)
20
+ [2023-08-13 19:06:53,137][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s)
21
+ [2023-08-13 19:06:53,137][inference][INFO] - Saving inference results
22
+ [2023-08-13 19:06:53,152][backend][INFO] - Cleaning backend