AppleSwing
commited on
Merge branch 'pr/27' into pr/30
Browse files- backend-cli.py +1 -0
- src/backend/hflm_with_measurement.py +73 -3
- src/backend/run_eval_suite.py +8 -0
- src/display/utils.py +4 -0
- src/submission/check_validity.py +1 -1
backend-cli.py
CHANGED
@@ -473,6 +473,7 @@ if __name__ == "__main__":
|
|
473 |
precisions = args.precision.split(",")
|
474 |
print(f"debug_model_names: {debug_model_names}, debug_task_name: {debug_task_name}, precisions: {precisions}")
|
475 |
task_lst = TASKS_HARNESS.copy()
|
|
|
476 |
for precision in precisions:
|
477 |
for debug_model_name in debug_model_names:
|
478 |
for task in task_lst:
|
|
|
473 |
precisions = args.precision.split(",")
|
474 |
print(f"debug_model_names: {debug_model_names}, debug_task_name: {debug_task_name}, precisions: {precisions}")
|
475 |
task_lst = TASKS_HARNESS.copy()
|
476 |
+
RESULTS_REPO = DEBUG_RESULTS_REPO
|
477 |
for precision in precisions:
|
478 |
for debug_model_name in debug_model_names:
|
479 |
for task in task_lst:
|
src/backend/hflm_with_measurement.py
CHANGED
@@ -5,6 +5,7 @@ import sys
|
|
5 |
from time import time
|
6 |
from pathlib import Path
|
7 |
from typing import List, Literal, Optional, Tuple, Union
|
|
|
8 |
|
9 |
import torch
|
10 |
import torch.nn.functional as F
|
@@ -37,6 +38,9 @@ from lm_eval.models.utils import (
|
|
37 |
stop_sequences_criteria,
|
38 |
)
|
39 |
from lm_eval.models.huggingface import HFLM
|
|
|
|
|
|
|
40 |
|
41 |
|
42 |
class StopWatch(TextStreamer):
|
@@ -67,6 +71,10 @@ class StopWatch(TextStreamer):
|
|
67 |
class HFLMWithMeasurement(HFLM):
|
68 |
def __init__(self, **kwargs):
|
69 |
super().__init__(**kwargs)
|
|
|
|
|
|
|
|
|
70 |
|
71 |
def _loglikelihood_tokens(
|
72 |
self,
|
@@ -306,6 +314,7 @@ class HFLMWithMeasurement(HFLM):
|
|
306 |
generation_kwargs.pop("temperature")
|
307 |
|
308 |
generation_kwargs.pop("is_gsm8k")
|
|
|
309 |
|
310 |
if not is_gsm8k:
|
311 |
# build stopping criteria
|
@@ -341,12 +350,71 @@ class HFLMWithMeasurement(HFLM):
|
|
341 |
|
342 |
batch_size = context.shape[0]
|
343 |
output_length = stop_watch.decoding_iterations
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
344 |
|
345 |
end_to_end_time = (end - start) / batch_size
|
346 |
prefilling_time = stop_watch.prefilling_time / batch_size
|
347 |
decoding_time = stop_watch.decoding_time / batch_size
|
348 |
token_per_sec = output_length / decoding_time
|
349 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
350 |
|
351 |
def generate_until(
|
352 |
self, requests: List[Instance], disable_tqdm: bool = False
|
@@ -461,7 +529,7 @@ class HFLMWithMeasurement(HFLM):
|
|
461 |
kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
|
462 |
|
463 |
# perform batched generation
|
464 |
-
cont, end_to_end_time, prefilling_time, token_per_sec = self._model_generate(
|
465 |
context=context_enc,
|
466 |
attention_mask=attn_masks,
|
467 |
stop=until,
|
@@ -476,6 +544,8 @@ class HFLMWithMeasurement(HFLM):
|
|
476 |
cont_toks = cont_toks[context_enc.shape[1] :]
|
477 |
|
478 |
s = self.tok_decode(cont_toks)
|
|
|
|
|
479 |
|
480 |
# use secondary stop seqs to cut off should-have-been-stopped content post-hoc
|
481 |
if not is_gsm8k:
|
@@ -485,7 +555,7 @@ class HFLMWithMeasurement(HFLM):
|
|
485 |
# for seq2seq case where self.tok_decode(self.eot_token_id) = ''
|
486 |
s = s.split(term)[0]
|
487 |
|
488 |
-
res.append((s, end_to_end_time, prefilling_time, token_per_sec))
|
489 |
|
490 |
self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s)
|
491 |
pbar.update(1)
|
|
|
5 |
from time import time
|
6 |
from pathlib import Path
|
7 |
from typing import List, Literal, Optional, Tuple, Union
|
8 |
+
from calflops import calculate_flops
|
9 |
|
10 |
import torch
|
11 |
import torch.nn.functional as F
|
|
|
38 |
stop_sequences_criteria,
|
39 |
)
|
40 |
from lm_eval.models.huggingface import HFLM
|
41 |
+
from src.utils import get_gpu_number, get_gpu_details, get_peak_bw, transfer_precision2bytes, get_peak_flops
|
42 |
+
from src.submission.check_validity import get_model_size
|
43 |
+
from src.envs import API
|
44 |
|
45 |
|
46 |
class StopWatch(TextStreamer):
|
|
|
71 |
class HFLMWithMeasurement(HFLM):
|
72 |
def __init__(self, **kwargs):
|
73 |
super().__init__(**kwargs)
|
74 |
+
self.pretrained = kwargs.get("pretrained", None)
|
75 |
+
self.revision = kwargs.get("revision", None)
|
76 |
+
self.precision = kwargs.get("dtype", None)
|
77 |
+
self.total_flops = 0
|
78 |
|
79 |
def _loglikelihood_tokens(
|
80 |
self,
|
|
|
314 |
generation_kwargs.pop("temperature")
|
315 |
|
316 |
generation_kwargs.pop("is_gsm8k")
|
317 |
+
context_length = context.shape[1]
|
318 |
|
319 |
if not is_gsm8k:
|
320 |
# build stopping criteria
|
|
|
350 |
|
351 |
batch_size = context.shape[0]
|
352 |
output_length = stop_watch.decoding_iterations
|
353 |
+
|
354 |
+
precision_bytes = transfer_precision2bytes(self.precision)
|
355 |
+
|
356 |
+
model_info = API.model_info(repo_id=self.pretrained, revision=self.revision)
|
357 |
+
model_size_param = get_model_size(model_info=model_info, precision=self.precision)
|
358 |
+
model_size = model_size_param * precision_bytes
|
359 |
+
|
360 |
+
model_config = self.model.config
|
361 |
+
|
362 |
+
n_layers = model_config.num_hidden_layers if hasattr(model_config, "num_hidden_layers") else model_config.num_layers
|
363 |
+
d_model = model_config.hidden_size if hasattr(model_config, "hidden_size") else model_config.d_model
|
364 |
+
|
365 |
+
if hasattr(model_config, "num_experts_per_tok"):
|
366 |
+
n_experts_per_tok = model_config.num_experts_per_tok
|
367 |
+
elif hasattr(model_config, "num_selected_experts"):
|
368 |
+
n_experts_per_tok = model_config.num_selected_experts
|
369 |
+
else:
|
370 |
+
n_experts_per_tok = 1
|
371 |
+
|
372 |
+
if hasattr(model_config, "ffn_dim"):
|
373 |
+
d_ff = model_config.ffn_dim
|
374 |
+
elif hasattr(model_config, "intermediate_size"):
|
375 |
+
d_ff = model_config.intermediate_size
|
376 |
+
elif hasattr(model_config, "d_ff"):
|
377 |
+
d_ff = model_config.d_ff
|
378 |
+
else:
|
379 |
+
raise ValueError("Unknown ffn dim model configuration")
|
380 |
+
|
381 |
+
if hasattr(model_config, "num_local_experts"):
|
382 |
+
num_experts = model_config.num_local_experts
|
383 |
+
elif hasattr(model_config, "num_experts"):
|
384 |
+
num_experts = model_config.num_experts
|
385 |
+
else:
|
386 |
+
num_experts = 1
|
387 |
+
|
388 |
+
ffn_params = n_layers * d_ff * 2 * d_model
|
389 |
+
|
390 |
+
shared_params = model_size_param * 1e9 - num_experts * ffn_params
|
391 |
+
|
392 |
+
model_size = shared_params + n_experts_per_tok * ffn_params
|
393 |
+
|
394 |
+
per_token_kv_size = 2 * n_layers * d_model * precision_bytes
|
395 |
+
|
396 |
+
peak_bw_single = get_peak_bw(get_gpu_details())
|
397 |
+
peak_bw = peak_bw_single * get_gpu_number()
|
398 |
+
|
399 |
+
kv_size = (output_length - 1) * per_token_kv_size / 1e9
|
400 |
|
401 |
end_to_end_time = (end - start) / batch_size
|
402 |
prefilling_time = stop_watch.prefilling_time / batch_size
|
403 |
decoding_time = stop_watch.decoding_time / batch_size
|
404 |
token_per_sec = output_length / decoding_time
|
405 |
+
ach_mem_bw = (model_size / 1e9 + kv_size) * token_per_sec
|
406 |
+
|
407 |
+
flops_per_token = 2 * model_size + 2 * n_layers * context_length * d_model
|
408 |
+
peak_flops_single = get_peak_flops(get_gpu_details(), self.precision)
|
409 |
+
peak_flops = peak_flops_single * get_gpu_number()
|
410 |
+
|
411 |
+
## TODO only support llama-type decoder only models and moe models of switch transformer and mixtrial
|
412 |
+
mfu = token_per_sec * flops_per_token / peak_flops
|
413 |
+
mbu = ach_mem_bw / peak_bw
|
414 |
+
|
415 |
+
# print(f"mfu: {mfu}, mbu: {mbu}")
|
416 |
+
|
417 |
+
return res, end_to_end_time, prefilling_time, token_per_sec, mfu, mbu
|
418 |
|
419 |
def generate_until(
|
420 |
self, requests: List[Instance], disable_tqdm: bool = False
|
|
|
529 |
kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
|
530 |
|
531 |
# perform batched generation
|
532 |
+
cont, end_to_end_time, prefilling_time, token_per_sec, mfu, mbu = self._model_generate(
|
533 |
context=context_enc,
|
534 |
attention_mask=attn_masks,
|
535 |
stop=until,
|
|
|
544 |
cont_toks = cont_toks[context_enc.shape[1] :]
|
545 |
|
546 |
s = self.tok_decode(cont_toks)
|
547 |
+
|
548 |
+
# print(s)
|
549 |
|
550 |
# use secondary stop seqs to cut off should-have-been-stopped content post-hoc
|
551 |
if not is_gsm8k:
|
|
|
555 |
# for seq2seq case where self.tok_decode(self.eot_token_id) = ''
|
556 |
s = s.split(term)[0]
|
557 |
|
558 |
+
res.append((s, end_to_end_time, prefilling_time, token_per_sec, mfu, mbu))
|
559 |
|
560 |
self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s)
|
561 |
pbar.update(1)
|
src/backend/run_eval_suite.py
CHANGED
@@ -17,12 +17,16 @@ def process_results_decorator(func):
|
|
17 |
end_to_end_time = sum([r[1] for r in results]) / len(results)
|
18 |
prefilling_time = sum([r[2] for r in results]) / len(results)
|
19 |
decoding_throughput = sum([r[3] for r in results]) / len(results)
|
|
|
|
|
20 |
# print(f"end_to_end_time: {end_to_end_time}, prefilling_time: {prefilling_time}, decoding_throughput: {decoding_throughput}")
|
21 |
|
22 |
result_dict = func(self, doc, processed_results, *args, **kwargs)
|
23 |
result_dict["end_to_end_time"] = end_to_end_time
|
24 |
result_dict["prefilling_time"] = prefilling_time
|
25 |
result_dict["decoding_throughput"] = decoding_throughput
|
|
|
|
|
26 |
return result_dict
|
27 |
return wrapper
|
28 |
ConfigurableTask.process_results = process_results_decorator(orig_process_results)
|
@@ -33,6 +37,8 @@ def aggregation_decorator(func):
|
|
33 |
aggregation_list["end_to_end_time"] = mean
|
34 |
aggregation_list["prefilling_time"] = mean
|
35 |
aggregation_list["decoding_throughput"] = mean
|
|
|
|
|
36 |
return aggregation_list
|
37 |
return wrapper
|
38 |
ConfigurableTask.aggregation = aggregation_decorator(orig_aggregation)
|
@@ -43,6 +49,8 @@ def higher_is_better_decorator(func):
|
|
43 |
higher_is_better_dict["end_to_end_time"] = False
|
44 |
higher_is_better_dict["prefilling_time"] = False
|
45 |
higher_is_better_dict["decoding_throughput"] = True
|
|
|
|
|
46 |
return higher_is_better_dict
|
47 |
return wrapper
|
48 |
ConfigurableTask.higher_is_better = higher_is_better_decorator(orig_higher_is_better)
|
|
|
17 |
end_to_end_time = sum([r[1] for r in results]) / len(results)
|
18 |
prefilling_time = sum([r[2] for r in results]) / len(results)
|
19 |
decoding_throughput = sum([r[3] for r in results]) / len(results)
|
20 |
+
mfu = sum([r[4] for r in results]) / len(results)
|
21 |
+
mbu = sum([r[5] for r in results]) / len(results)
|
22 |
# print(f"end_to_end_time: {end_to_end_time}, prefilling_time: {prefilling_time}, decoding_throughput: {decoding_throughput}")
|
23 |
|
24 |
result_dict = func(self, doc, processed_results, *args, **kwargs)
|
25 |
result_dict["end_to_end_time"] = end_to_end_time
|
26 |
result_dict["prefilling_time"] = prefilling_time
|
27 |
result_dict["decoding_throughput"] = decoding_throughput
|
28 |
+
result_dict["mfu"] = mfu * 100
|
29 |
+
result_dict["mbu"] = mbu * 100
|
30 |
return result_dict
|
31 |
return wrapper
|
32 |
ConfigurableTask.process_results = process_results_decorator(orig_process_results)
|
|
|
37 |
aggregation_list["end_to_end_time"] = mean
|
38 |
aggregation_list["prefilling_time"] = mean
|
39 |
aggregation_list["decoding_throughput"] = mean
|
40 |
+
aggregation_list["mfu"] = mean
|
41 |
+
aggregation_list["mbu"] = mean
|
42 |
return aggregation_list
|
43 |
return wrapper
|
44 |
ConfigurableTask.aggregation = aggregation_decorator(orig_aggregation)
|
|
|
49 |
higher_is_better_dict["end_to_end_time"] = False
|
50 |
higher_is_better_dict["prefilling_time"] = False
|
51 |
higher_is_better_dict["decoding_throughput"] = True
|
52 |
+
higher_is_better_dict["mfu"] = True
|
53 |
+
higher_is_better_dict["mbu"] = True
|
54 |
return higher_is_better_dict
|
55 |
return wrapper
|
56 |
ConfigurableTask.higher_is_better = higher_is_better_decorator(orig_higher_is_better)
|
src/display/utils.py
CHANGED
@@ -18,12 +18,16 @@ GPU_Power = 'Power(W)'
|
|
18 |
GPU_Mem = 'Mem(G)'
|
19 |
GPU_Name = "GPU"
|
20 |
GPU_Util = 'Util(%)'
|
|
|
|
|
21 |
BATCH_SIZE = 'bs'
|
22 |
PRECISION = "Precision"
|
23 |
system_metrics_to_name_map = {
|
24 |
"end_to_end_time": f"{E2Es}",
|
25 |
"prefilling_time": f"{PREs}",
|
26 |
"decoding_throughput": f"{TS}",
|
|
|
|
|
27 |
}
|
28 |
|
29 |
gpu_metrics_to_name_map = {
|
|
|
18 |
GPU_Mem = 'Mem(G)'
|
19 |
GPU_Name = "GPU"
|
20 |
GPU_Util = 'Util(%)'
|
21 |
+
MFU = 'MFU(%)'
|
22 |
+
MBU = 'MBU(%)'
|
23 |
BATCH_SIZE = 'bs'
|
24 |
PRECISION = "Precision"
|
25 |
system_metrics_to_name_map = {
|
26 |
"end_to_end_time": f"{E2Es}",
|
27 |
"prefilling_time": f"{PREs}",
|
28 |
"decoding_throughput": f"{TS}",
|
29 |
+
"mfu": f"{MFU}",
|
30 |
+
"mbu": f"{MBU}"
|
31 |
}
|
32 |
|
33 |
gpu_metrics_to_name_map = {
|
src/submission/check_validity.py
CHANGED
@@ -74,7 +74,7 @@ def is_model_on_hub(
|
|
74 |
|
75 |
|
76 |
def get_model_size(model_info: ModelInfo, precision: str):
|
77 |
-
size_pattern =
|
78 |
try:
|
79 |
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
80 |
except (AttributeError, TypeError):
|
|
|
74 |
|
75 |
|
76 |
def get_model_size(model_info: ModelInfo, precision: str):
|
77 |
+
size_pattern = re.compile(r"(\d\.)?\d+(b|m)")
|
78 |
try:
|
79 |
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
80 |
except (AttributeError, TypeError):
|