import math # Helper function to pretty-print message sizes def convert_params(params): if params == 0: return "0" size_name = ("", "K", "M", "B", "T", "P", "E", "Z", "Y") i = int(math.floor(math.log(params, 1000))) p = math.pow(1000, i) s = round(params / p, 2) return "%s %s" % (s, size_name[i]) # Parameter Calculation function def calc_params(vocab_size, tied_embeddings, hidden_size, sequence_length, num_layers, moe, num_experts, expert_interval, topk, ffn_expansion_factor, num_mlp_linears, kv_size_ratio): if tied_embeddings: embedding_params = hidden_size * vocab_size else: embedding_params = 2 * hidden_size * vocab_size position_embedding_params = hidden_size * sequence_length attention_params = int(2 * (1 + kv_size_ratio) * num_layers * hidden_size * hidden_size) layernorm_params = 13 * num_layers * hidden_size if moe: num_expert_layers = num_layers / expert_interval ffn_expert_params = num_mlp_linears * ffn_expansion_factor * num_expert_layers * num_experts * hidden_size * hidden_size ffn_dense_params = num_mlp_linears * ffn_expansion_factor * (num_layers - num_expert_layers) * hidden_size * hidden_size ffn_params = ffn_expert_params + ffn_dense_params gating_params = num_expert_layers * hidden_size * num_experts else: ffn_params = num_mlp_linears * ffn_expansion_factor * num_layers * hidden_size * hidden_size total_params = embedding_params + attention_params + ffn_params + position_embedding_params + layernorm_params if moe: total_params += gating_params return f""" Embedding parameters: {convert_params(embedding_params)} Attention parameters: {convert_params(attention_params)} FFN parameters: {convert_params(ffn_params)} {'Gating parameters: ' + convert_params(gating_params) if moe else ''} Total Params in the Model: {convert_params(total_params)} """