|
# File: transformers-bloom-inference-main/bloom-inference-scripts/bloom-accelerate-inference.py |
|
import argparse |
|
import gc |
|
import math |
|
import os |
|
import time |
|
import torch |
|
import torch.distributed as dist |
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer |
|
|
|
def get_args(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--local_rank', required=False, type=int, help='used by dist launchers') |
|
parser.add_argument('--name', type=str, help='Name path', required=True) |
|
parser.add_argument('--batch_size', default=1, type=int, help='batch size') |
|
parser.add_argument('--benchmark', action='store_true', help='additionally run benchmark') |
|
parser.add_argument('--greedy', action='store_true') |
|
parser.add_argument('--top-k', type=int, default=0) |
|
parser.add_argument('--top-p', type=float, default=0.0) |
|
parser.add_argument('--dtype', type=str, help='float16 or int8', choices=['int8', 'float16'], default='float16') |
|
return parser.parse_args() |
|
t_start = time.time() |
|
num_tokens = 100 |
|
args = get_args() |
|
local_rank = int(os.getenv('LOCAL_RANK', '0')) |
|
world_size = torch.cuda.device_count() |
|
rank = local_rank |
|
|
|
def print_rank0(*msg): |
|
if rank != 0: |
|
return |
|
print(*msg) |
|
print_rank0(f'Using {world_size} gpus') |
|
model_name = args.name |
|
print_rank0(f'Loading model {model_name}') |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
dtype = torch.bfloat16 if model_name in ['bigscience/bloom', 'bigscience/bigscience-small-testing'] else torch.float16 |
|
infer_dtype = args.dtype |
|
if infer_dtype == 'int8': |
|
dtype = torch.int8 |
|
kwargs = dict(device_map='auto') |
|
|
|
def get_world_size() -> int: |
|
if dist.is_initialized(): |
|
return dist.get_world_size() |
|
else: |
|
return 1 |
|
if get_world_size() > 1: |
|
kwargs['device_map'] = 'balanced_low_0' |
|
if infer_dtype == 'int8': |
|
print_rank0('Using `load_in_8bit=True` to use quanitized model') |
|
kwargs['load_in_8bit'] = True |
|
else: |
|
kwargs['torch_dtype'] = dtype |
|
model = AutoModelForCausalLM.from_pretrained(model_name, **kwargs) |
|
if args.benchmark: |
|
t_ready = time.time() |
|
print_rank0(f'*** Starting to generate {num_tokens} tokens with bs={args.batch_size}') |
|
input_sentences = ['DeepSpeed is a machine learning framework', 'He is working on', 'He has a', 'He got all', 'Everyone is happy and I can', 'The new movie that got Oscar this year', 'In the far far distance from our galaxy,', 'Peace is the only way'] |
|
if args.batch_size > len(input_sentences): |
|
input_sentences *= math.ceil(args.batch_size / len(input_sentences)) |
|
generate_kwargs = dict(max_new_tokens=num_tokens, do_sample=False) |
|
print_rank0(f'Generate args {generate_kwargs}') |
|
inputs = input_sentences[:args.batch_size] |
|
|
|
def generate(): |
|
input_tokens = tokenizer.batch_encode_plus(inputs, return_tensors='pt', padding=True) |
|
for t in input_tokens: |
|
if torch.is_tensor(input_tokens[t]): |
|
input_tokens[t] = input_tokens[t].to('cuda:0') |
|
outputs = model.generate(**input_tokens, **generate_kwargs) |
|
input_tokens_lengths = [x.shape[0] for x in input_tokens.input_ids] |
|
output_tokens_lengths = [x.shape[0] for x in outputs] |
|
total_new_tokens = [o - i for (i, o) in zip(input_tokens_lengths, output_tokens_lengths)] |
|
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) |
|
return zip(inputs, outputs, total_new_tokens) |
|
print_rank0('*** Running generate') |
|
t_generate_start = time.time() |
|
generated = generate() |
|
t_generate_span = time.time() - t_generate_start |
|
for (i, o, _) in generated: |
|
print_rank0(f"{'-' * 60}\nin={i}\nout={o}\n") |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
print_rank0('*** Running benchmark') |
|
for i in range(1): |
|
_ = generate() |
|
torch.cuda.synchronize() |
|
t0 = time.time() |
|
cycles = 5 |
|
total_new_tokens_generated = 0 |
|
for i in range(cycles): |
|
generated = generate() |
|
total_new_tokens_generated += sum((new_tokens for (_, _, new_tokens) in generated)) |
|
torch.cuda.synchronize() |
|
throughput = (time.time() - t0) / total_new_tokens_generated |
|
print_rank0(f'\n*** Performance stats:\nThroughput per token including tokenize: {throughput * 1000:.2f} msecs\nStart to ready to generate: {t_ready - t_start:.3f} secs\nTokenize and generate {total_new_tokens_generated} (bs={args.batch_size}) tokens: {t_generate_span:.3f} secs\nStart to finish: {t_ready - t_start + t_generate_span:.3f} secs\n') |
|
|
|
# File: transformers-bloom-inference-main/bloom-inference-scripts/bloom-ds-inference.py |
|
import gc |
|
import io |
|
import json |
|
import math |
|
import os |
|
import time |
|
from argparse import ArgumentParser |
|
from pathlib import Path |
|
import torch |
|
import torch.distributed as dist |
|
import deepspeed |
|
from huggingface_hub import snapshot_download |
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer |
|
from transformers.models.bloom.modeling_bloom import BloomBlock as BloomBlock |
|
from transformers.utils import is_offline_mode |
|
tp_presharded_models = ['microsoft/bloom-deepspeed-inference-int8', 'microsoft/bloom-deepspeed-inference-fp16'] |
|
t_start = time.time() |
|
num_tokens = 100 |
|
parser = ArgumentParser() |
|
parser.add_argument('--name', required=True, type=str, help='model_name') |
|
parser.add_argument('--dtype', type=str, help='float16 or int8', choices=['int8', 'float16'], default='float16') |
|
parser.add_argument('--local_rank', required=False, type=int, help='used by dist launchers') |
|
parser.add_argument('--batch_size', default=1, type=int, help='batch size') |
|
parser.add_argument('--benchmark', action='store_true', help='additionally run benchmark') |
|
args = parser.parse_args() |
|
local_rank = int(os.getenv('LOCAL_RANK', '0')) |
|
world_size = int(os.getenv('WORLD_SIZE', '1')) |
|
deepspeed.init_distributed('nccl') |
|
rank = dist.get_rank() |
|
|
|
def print_rank0(*msg): |
|
if rank != 0: |
|
return |
|
print(*msg) |
|
|
|
def get_repo_root(model_name_or_path): |
|
if is_offline_mode(): |
|
print_rank0('Offline mode: forcing local_files_only=True') |
|
if rank == 0: |
|
snapshot_download(model_name_or_path, local_files_only=is_offline_mode(), cache_dir=os.getenv('TRANSFORMERS_CACHE', None), ignore_patterns=['*.safetensors']) |
|
dist.barrier() |
|
return snapshot_download(model_name_or_path, local_files_only=is_offline_mode(), cache_dir=os.getenv('TRANSFORMERS_CACHE', None), ignore_patterns=['*.safetensors']) |
|
|
|
def get_checkpoint_files(model_name_or_path): |
|
cached_repo_dir = get_repo_root(model_name_or_path) |
|
file_list = [str(entry) for entry in Path(cached_repo_dir).rglob('*.[bp][it][n]') if entry.is_file()] |
|
return file_list |
|
model_name = args.name |
|
infer_dtype = args.dtype |
|
tp_presharded_mode = True if model_name in tp_presharded_models else False |
|
print_rank0(f'*** Loading the model {model_name}') |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
config = AutoConfig.from_pretrained(model_name) |
|
kernel_inject = True |
|
if kernel_inject: |
|
dtype = torch.float16 |
|
else: |
|
dtype = torch.bfloat16 |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
deepspeed.runtime.utils.see_memory_usage('pre-from-pretrained', force=True) |
|
with deepspeed.OnDevice(dtype=dtype, device='meta'): |
|
model = AutoModelForCausalLM.from_config(config, torch_dtype=torch.bfloat16) |
|
if args.benchmark: |
|
deepspeed.runtime.utils.see_memory_usage('post-from-pretrained', force=True) |
|
model = model.eval() |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
deepspeed.runtime.utils.see_memory_usage('post-init-ds-zero-init', force=True) |
|
checkpoints_json = 'checkpoints.json' |
|
|
|
def write_checkpoints_json(): |
|
checkpoint_files = get_checkpoint_files(model_name) |
|
if rank == 0: |
|
data = {'type': 'BLOOM', 'checkpoints': checkpoint_files, 'version': 1.0} |
|
json.dump(data, open(checkpoints_json, 'w')) |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
deepspeed.runtime.utils.see_memory_usage('pre-ds-inference-init', force=True) |
|
if kernel_inject: |
|
kwargs = dict(replace_with_kernel_inject=True) |
|
else: |
|
kwargs = dict(injection_policy={BloomBlock: ('self_attention.dense', 'mlp.dense_4h_to_h')}) |
|
repo_root = get_repo_root(model_name) |
|
if tp_presharded_mode: |
|
checkpoints_json = os.path.join(repo_root, 'ds_inference_config.json') |
|
else: |
|
write_checkpoints_json() |
|
dist.barrier() |
|
model = deepspeed.init_inference(model, mp_size=world_size, base_dir=repo_root, dtype=getattr(torch, infer_dtype), checkpoint=checkpoints_json, **kwargs) |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
deepspeed.runtime.utils.see_memory_usage('post-ds-inference-init', force=True) |
|
model = model.module |
|
if args.benchmark: |
|
t_ready = time.time() |
|
print_rank0(f'*** Starting to generate {num_tokens} tokens with bs={args.batch_size}') |
|
input_sentences = ['DeepSpeed is a machine learning framework', 'He is working on', 'He has a', 'He got all', 'Everyone is happy and I can', 'The new movie that got Oscar this year', 'In the far far distance from our galaxy,', 'Peace is the only way'] |
|
if args.batch_size > len(input_sentences): |
|
input_sentences *= math.ceil(args.batch_size / len(input_sentences)) |
|
generate_kwargs = dict(max_new_tokens=num_tokens, do_sample=False) |
|
print_rank0(f'Generate args {generate_kwargs}') |
|
inputs = input_sentences[:args.batch_size] |
|
|
|
def generate(): |
|
input_tokens = tokenizer.batch_encode_plus(inputs, return_tensors='pt', padding=True) |
|
for t in input_tokens: |
|
if torch.is_tensor(input_tokens[t]): |
|
input_tokens[t] = input_tokens[t].to(torch.cuda.current_device()) |
|
outputs = model.generate(**input_tokens, **generate_kwargs) |
|
input_tokens_lengths = [x.shape[0] for x in input_tokens.input_ids] |
|
output_tokens_lengths = [x.shape[0] for x in outputs] |
|
total_new_tokens = [o - i for (i, o) in zip(input_tokens_lengths, output_tokens_lengths)] |
|
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) |
|
return zip(inputs, outputs, total_new_tokens) |
|
print_rank0('*** Running generate warmup') |
|
_ = generate() |
|
print_rank0('*** Running generate') |
|
t_generate_start = time.time() |
|
generated = generate() |
|
t_generate_span = time.time() - t_generate_start |
|
for (i, o, _) in generated: |
|
print_rank0(f"{'-' * 60}\nin={i}\nout={o}\n") |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
deepspeed.runtime.utils.see_memory_usage('end-of-run', force=True) |
|
if args.benchmark: |
|
print_rank0('*** Running benchmark') |
|
for i in range(1): |
|
_ = generate() |
|
torch.cuda.synchronize() |
|
t0 = time.time() |
|
cycles = 5 |
|
total_new_tokens_generated = 0 |
|
for i in range(cycles): |
|
generated = generate() |
|
total_new_tokens_generated += sum((new_tokens for (_, _, new_tokens) in generated)) |
|
torch.cuda.synchronize() |
|
throughput = (time.time() - t0) / total_new_tokens_generated |
|
print_rank0(f'\n*** Performance stats:\nThroughput per token including tokenize: {throughput * 1000:.2f} msecs\nStart to ready to generate: {t_ready - t_start:.3f} secs\nTokenize and generate {total_new_tokens_generated} (bs={args.batch_size}) tokens: {t_generate_span:.3f} secs\nStart to finish: {t_ready - t_start + t_generate_span:.3f} secs\n') |
|
|
|
# File: transformers-bloom-inference-main/bloom-inference-scripts/bloom-ds-zero-inference.py |
|
import gc |
|
import math |
|
import os |
|
import time |
|
from argparse import ArgumentParser |
|
import torch |
|
import torch.distributed as dist |
|
import deepspeed |
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer |
|
from transformers.deepspeed import HfDeepSpeedConfig |
|
from transformers.models.bloom.modeling_bloom import BloomBlock as BloomBlock |
|
t_start = time.time() |
|
num_tokens = 100 |
|
parser = ArgumentParser() |
|
parser.add_argument('--name', required=True, type=str, help='model_name') |
|
parser.add_argument('--local_rank', required=False, type=int, help='used by dist launchers') |
|
parser.add_argument('--batch_size', default=1, type=int, help='batch size') |
|
parser.add_argument('--benchmark', action='store_true', help='additionally run benchmark') |
|
parser.add_argument('--cpu_offload', action='store_true', help='whether to activate CPU offload') |
|
parser.add_argument('--nvme_offload_path', help='whether to activate NVME offload and the path on nvme') |
|
args = parser.parse_args() |
|
local_rank = int(os.getenv('LOCAL_RANK', '0')) |
|
world_size = int(os.getenv('WORLD_SIZE', '1')) |
|
deepspeed.init_distributed('nccl') |
|
rank = dist.get_rank() |
|
|
|
def print_rank0(*msg): |
|
if rank != 0: |
|
return |
|
print(*msg) |
|
model_name = args.name |
|
print_rank0(f'*** Loading the model {model_name}') |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
config = AutoConfig.from_pretrained(model_name) |
|
dtype = torch.bfloat16 if model_name in ['bigscience/bloom', 'bigscience/bigscience-small-testing'] else torch.float16 |
|
model_hidden_size = config.hidden_size |
|
train_batch_size = 1 * world_size |
|
ds_config = {'fp16': {'enabled': dtype == torch.float16}, 'bf16': {'enabled': dtype == torch.bfloat16}, 'zero_optimization': {'stage': 3, 'overlap_comm': True, 'contiguous_gradients': True, 'reduce_bucket_size': model_hidden_size * model_hidden_size, 'stage3_prefetch_bucket_size': 0.9 * model_hidden_size * model_hidden_size, 'stage3_param_persistence_threshold': 0}, 'steps_per_print': 2000, 'train_batch_size': train_batch_size, 'train_micro_batch_size_per_gpu': 1, 'wall_clock_breakdown': False} |
|
if args.cpu_offload and args.nvme_offload_path: |
|
raise ValueError('Use one of --cpu_offload or --nvme_offload_path and not both') |
|
if args.cpu_offload: |
|
ds_config['zero_optimization']['offload_param'] = dict(device='cpu', pin_memory=True) |
|
if args.nvme_offload_path: |
|
ds_config['zero_optimization']['offload_param'] = dict(device='nvme', pin_memory=True, nvme_path=args.nvme_offload_path, buffer_size=4000000000.0) |
|
dschf = HfDeepSpeedConfig(ds_config) |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
deepspeed.runtime.utils.see_memory_usage('pre-from-pretrained', force=True) |
|
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16) |
|
if args.benchmark: |
|
deepspeed.runtime.utils.see_memory_usage('post-from-pretrained', force=True) |
|
model = model.eval() |
|
print_rank0(ds_config) |
|
ds_engine = deepspeed.initialize(model=model, config_params=ds_config)[0] |
|
ds_engine.module.eval() |
|
model = ds_engine.module |
|
if args.benchmark: |
|
t_ready = time.time() |
|
deepspeed.runtime.utils.see_memory_usage('start-of-generate', force=True) |
|
print_rank0(f'*** Starting to generate {num_tokens} tokens with bs={args.batch_size}') |
|
input_sentences = ['DeepSpeed is a machine learning framework', 'He is working on', 'He has a', 'He got all', 'Everyone is happy and I can', 'The new movie that got Oscar this year', 'In the far far distance from our galaxy,', 'Peace is the only way'] |
|
if args.batch_size > len(input_sentences): |
|
input_sentences *= math.ceil(args.batch_size / len(input_sentences)) |
|
generate_kwargs = dict(max_new_tokens=num_tokens, do_sample=False) |
|
print_rank0(f'Generate args {generate_kwargs}') |
|
inputs = input_sentences[:args.batch_size] |
|
|
|
def generate(): |
|
input_tokens = tokenizer.batch_encode_plus(inputs, return_tensors='pt', padding=True) |
|
for t in input_tokens: |
|
if torch.is_tensor(input_tokens[t]): |
|
input_tokens[t] = input_tokens[t].to(torch.cuda.current_device()) |
|
outputs = model.generate(**input_tokens, **generate_kwargs) |
|
input_tokens_lengths = [x.shape[0] for x in input_tokens.input_ids] |
|
output_tokens_lengths = [x.shape[0] for x in outputs] |
|
total_new_tokens = [o - i for (i, o) in zip(input_tokens_lengths, output_tokens_lengths)] |
|
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) |
|
return zip(inputs, outputs, total_new_tokens) |
|
print_rank0('*** Running generate') |
|
t_generate_start = time.time() |
|
pairs = generate() |
|
t_generate_span = time.time() - t_generate_start |
|
for (i, o, _) in pairs: |
|
print_rank0(f"{'-' * 60}\nin={i}\nout={o}\n") |
|
if args.benchmark: |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
deepspeed.runtime.utils.see_memory_usage('end-of-generate', force=True) |
|
print_rank0('*** Running benchmark') |
|
for i in range(1): |
|
_ = generate() |
|
torch.cuda.synchronize() |
|
t0 = time.time() |
|
cycles = 5 |
|
total_new_tokens_generated = 0 |
|
for i in range(cycles): |
|
generated = generate() |
|
total_new_tokens_generated += sum((new_tokens for (_, _, new_tokens) in generated)) |
|
torch.cuda.synchronize() |
|
total_new_tokens_generated *= world_size |
|
throughput = (time.time() - t0) / total_new_tokens_generated |
|
print_rank0(f'\n*** Performance stats:\nThroughput per token including tokenize: {throughput * 1000:.2f} msecs\nStart to ready to generate: {t_ready - t_start:.3f} secs\nTokenize and generate {total_new_tokens_generated} (bs={args.batch_size}) tokens: {t_generate_span:.3f} secs\nStart to finish: {t_ready - t_start + t_generate_span:.3f} secs\n') |
|
|
|
# File: transformers-bloom-inference-main/inference_server/benchmark.py |
|
import argparse |
|
import gc |
|
from functools import partial |
|
import torch |
|
from .constants import DS_INFERENCE, DS_ZERO |
|
from .model_handler.deployment import ModelDeployment |
|
from .models import start_inference_engine |
|
from .utils import GenerateRequest, create_generate_request, get_argument_parser, get_dummy_batch, get_world_size, parse_args, print_rank_0, run_and_log_time |
|
|
|
def benchmark_generation(model: ModelDeployment, request: GenerateRequest, cycles: int=5): |
|
total_new_tokens_generated = 0 |
|
for _ in range(cycles): |
|
response = model.generate(request=request) |
|
total_new_tokens_generated += sum((new_tokens for new_tokens in response.num_generated_tokens)) |
|
return total_new_tokens_generated |
|
|
|
def get_benchmark_results(benchmark_time: float, initialization_time: float, total_new_tokens_generated: int, batch_size: int, cycles: int) -> str: |
|
throughput = total_new_tokens_generated / benchmark_time |
|
latency = benchmark_time / cycles |
|
return f'\n*** Performance stats:\nThroughput (including tokenization) = {throughput:.2f} tokens/sec\nThroughput (including tokenization) = {1000 / throughput:.2f} msecs/token\nModel loading time = {initialization_time:.2f} secs\nTotal tokens generated = {total_new_tokens_generated} with batch size = {batch_size}\nLatency = {latency:.2f} secs\nModel loading time + generation time per batch = {initialization_time + latency:.2f} secs\n' |
|
|
|
def benchmark_end_to_end(args: argparse.Namespace) -> None: |
|
(model, initialization_time) = run_and_log_time(partial(ModelDeployment, args=args, grpc_allowed=False)) |
|
request = create_generate_request(get_dummy_batch(args.batch_size), args.generate_kwargs) |
|
print_rank_0(f'generate_kwargs = {args.generate_kwargs}') |
|
print_rank_0(f'batch_size = {args.batch_size}') |
|
response = model.generate(request=request) |
|
for (i, (o, _)) in zip(request.text, zip(response.text, response.num_generated_tokens)): |
|
print_rank_0(f"{'-' * 60}\nin = {i}\nout = {o}\n") |
|
if args.benchmark_cycles > 0: |
|
print_rank_0('*** Running benchmark') |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
model.generate(request=request) |
|
torch.cuda.synchronize() |
|
(total_new_tokens_generated, benchmark_time) = run_and_log_time(partial(benchmark_generation, model=model, request=request, cycles=args.benchmark_cycles)) |
|
if args.deployment_framework == DS_ZERO: |
|
total_new_tokens_generated *= get_world_size() |
|
print_rank_0(get_benchmark_results(benchmark_time, initialization_time, total_new_tokens_generated, args.batch_size, args.benchmark_cycles)) |
|
|
|
def get_args() -> argparse.Namespace: |
|
parser = get_argument_parser() |
|
group = parser.add_argument_group(title='launch config') |
|
group.add_argument('--benchmark_cycles', type=int, default=0, help='additionally run benchmark') |
|
group.add_argument('--local_rank', required=False, type=int, help='used by dist launchers') |
|
group.add_argument('--batch_size', default=1, type=int, help='batch size') |
|
group.add_argument('--cpu_offload', action='store_true', help='whether to activate CPU offload for DS ZeRO') |
|
args = parse_args(parser) |
|
launched_with_deepspeed = args.deployment_framework in [DS_INFERENCE, DS_ZERO] |
|
assert args.max_batch_size == None, 'max_batch_size is not supported with benchmark' |
|
if not launched_with_deepspeed: |
|
assert args.local_rank == None, 'local_rank must be None if not launched with DeepSpeed' |
|
if args.cpu_offload: |
|
assert args.deployment_framework == DS_ZERO, 'cpu_offload only works with DS_ZeRO' |
|
return args |
|
|
|
def main() -> None: |
|
args = get_args() |
|
start_inference_engine(args.deployment_framework) |
|
benchmark_end_to_end(args) |
|
if __name__ == '__main__': |
|
main() |
|
|
|
# File: transformers-bloom-inference-main/inference_server/cli.py |
|
import argparse |
|
import json |
|
import sys |
|
from .model_handler import ModelDeployment |
|
from .utils import get_argument_parser, parse_args, print_rank_0 |
|
|
|
def get_args() -> argparse.Namespace: |
|
parser = get_argument_parser() |
|
args = parse_args(parser) |
|
return args |
|
|
|
def main() -> None: |
|
args = get_args() |
|
model = ModelDeployment(args, True) |
|
generate_kwargs = args.generate_kwargs |
|
while True: |
|
input_text = input('Input text: ') |
|
if input('change generate_kwargs? [y/n] ') == 'y': |
|
while True: |
|
try: |
|
generate_kwargs = json.loads(input('Generate kwargs: ')) |
|
break |
|
except Exception as e: |
|
(e_type, e_message, _) = sys.exc_info() |
|
print('error =', e_type.__name__) |
|
print('message =', e_message) |
|
continue |
|
response = model.generate(text=[input_text], generate_kwargs=generate_kwargs) |
|
print_rank_0('Output text:', response.text[0]) |
|
print_rank_0('Generated tokens:', response.num_generated_tokens[0]) |
|
if __name__ == '__main__': |
|
main() |
|
|
|
# File: transformers-bloom-inference-main/inference_server/download_model.py |
|
import argparse |
|
from inference_server.models import get_hf_model_class |
|
from transformers import AutoConfig, AutoTokenizer |
|
|
|
def get_args() -> argparse.Namespace: |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--model_name', type=str, required=True, help='model to use') |
|
parser.add_argument('--model_class', type=str, required=True, help='model class to use') |
|
args = parser.parse_args() |
|
return args |
|
|
|
def main() -> None: |
|
args = get_args() |
|
print('downloading', args.model_name) |
|
AutoConfig.from_pretrained(args.model_name) |
|
AutoTokenizer.from_pretrained(args.model_name) |
|
get_hf_model_class(args.model_class).from_pretrained(args.model_name) |
|
if __name__ == '__main__': |
|
main() |
|
|
|
# File: transformers-bloom-inference-main/inference_server/model_handler/deployment.py |
|
"""""" |
|
import argparse |
|
import asyncio |
|
import subprocess |
|
import time |
|
from typing import List |
|
import grpc |
|
from ..constants import DS_INFERENCE, DS_ZERO |
|
from ..models import get_model_class, load_tokenizer |
|
from ..utils import ForwardRequest, ForwardResponse, GenerateResponse, TokenizeRequest, TokenizeResponse, create_generate_request, get_cuda_visible_devices, get_str_dtype, get_world_size, print_rank_0 |
|
from .grpc_utils.pb import generation_pb2, generation_pb2_grpc |
|
|
|
class ModelDeployment: |
|
|
|
def __init__(self, args: argparse.Namespace, grpc_allowed: bool=False): |
|
self.cuda_visible_devices = get_cuda_visible_devices() |
|
self.num_gpus = get_world_size() |
|
self.use_grpc_server = self.should_use_grpc(args.deployment_framework, grpc_allowed) |
|
if self.use_grpc_server: |
|
self.tokenizer = load_tokenizer(args.model_name) |
|
self.initialize_ports() |
|
self.dtype_proto_field = {str: 'svalue', int: 'ivalue', float: 'fvalue', bool: 'bvalue'} |
|
self._initialize_service(args) |
|
self._wait_until_server_is_live() |
|
self.asyncio_loop = asyncio.get_event_loop() |
|
self._initialize_grpc_client() |
|
else: |
|
self.model = get_model_class(args.deployment_framework)(args) |
|
print_rank_0('model loaded') |
|
|
|
def should_use_grpc(self, deployment_framework: str, grpc_allowed: bool) -> bool: |
|
if grpc_allowed and get_world_size() > 1: |
|
return deployment_framework in [DS_INFERENCE, DS_ZERO] |
|
return False |
|
|
|
def initialize_ports(self): |
|
self.ports = [] |
|
for i in range(self.num_gpus): |
|
self.ports.append(50950 + self.cuda_visible_devices[i]) |
|
|
|
def _is_socket_open(self, port): |
|
import socket |
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
result = sock.connect_ex(('0.0.0.0', port)) |
|
sock.close() |
|
return result == 0 |
|
|
|
def _is_server_process_alive(self): |
|
if self.process is None: |
|
return True |
|
try: |
|
self.process.wait(1) |
|
except subprocess.TimeoutExpired as err: |
|
is_alive = True |
|
else: |
|
is_alive = False |
|
return is_alive |
|
|
|
def _wait_until_server_is_live(self): |
|
sockets_open = False |
|
while not sockets_open: |
|
sockets_open = self._is_socket_open(self.ports[0]) |
|
process_alive = self._is_server_process_alive() |
|
if not process_alive: |
|
raise RuntimeError('server crashed for some reason, unable to proceed') |
|
time.sleep(4) |
|
print_rank_0('waiting for server to start...') |
|
print_rank_0(f'server has started on {self.ports[0]}') |
|
|
|
def dict_to_proto(self, generate_kwargs: dict) -> dict: |
|
result = {} |
|
for (k, v) in generate_kwargs.items(): |
|
if v is not None: |
|
x = generation_pb2.Value() |
|
setattr(x, self.dtype_proto_field[type(v)], v) |
|
result[k] = x |
|
return result |
|
|
|
def _initialize_service(self, args: argparse.Namespace): |
|
if self._is_socket_open(self.ports[0]): |
|
raise RuntimeError(f'Server is already running on port {self.ports}, please shutdown or use different port.') |
|
if args.deployment_framework in [DS_INFERENCE, DS_ZERO]: |
|
ports = ' '.join(map(str, self.ports)) |
|
cmd = f'inference_server.model_handler.launch --model_name {args.model_name} --deployment_framework {args.deployment_framework} --dtype {get_str_dtype(args.dtype)} --port {ports} --model_class {args.model_class}' |
|
if args.max_batch_size is not None: |
|
cmd += f' --max_batch_size {args.max_batch_size}' |
|
if args.max_input_length is not None: |
|
cmd += f' --max_input_length {args.max_input_length}' |
|
master_port = 29500 + min(self.cuda_visible_devices) |
|
cuda_visible_devices = ','.join(map(str, self.cuda_visible_devices)) |
|
cmd = f'deepspeed --master_port {master_port} --include localhost:{cuda_visible_devices} --module {cmd}' |
|
else: |
|
raise NotImplementedError(f'unsupported deployment_framework: {args.deployment_framework}') |
|
cmd = cmd.split(' ') |
|
self.process = subprocess.Popen(cmd) |
|
|
|
def _initialize_grpc_client(self): |
|
self.stubs = [] |
|
for i in self.ports: |
|
channel = grpc.aio.insecure_channel(f'localhost:{i}') |
|
stub = generation_pb2_grpc.GenerationServiceStub(channel) |
|
self.stubs.append(stub) |
|
|
|
async def generate_in_tensor_parallel(self, text: List[str], generate_kwargs: dict): |
|
responses = [] |
|
for i in range(self.num_gpus): |
|
responses.append(self.asyncio_loop.create_task(self.generate_async(i, text, generate_kwargs))) |
|
await responses[0] |
|
return responses[0] |
|
|
|
async def generate_async(self, stub_id: int, text: List[str], generate_kwargs: dict): |
|
req = generation_pb2.GenerationRequestProto(texts=text, generate_kwargs=generate_kwargs) |
|
response = await self.stubs[stub_id].Generate(req) |
|
return response |
|
|
|
async def forward_in_tensor_parallel(self, conditioning_text: List[str], response: List[str]): |
|
responses = [] |
|
for i in range(self.num_gpus): |
|
responses.append(self.asyncio_loop.create_task(self.forward_async(i, conditioning_text, response))) |
|
await responses[0] |
|
return responses[0] |
|
|
|
async def forward_async(self, stub_id: int, conditioning_text: List[str], response: List[str]): |
|
req = generation_pb2.ForwardRequestProto(conditioning_text=conditioning_text, response=response) |
|
response = await self.stubs[stub_id].Forward(req) |
|
return response |
|
|
|
def generate(self, **kwargs) -> GenerateResponse: |
|
if self.use_grpc_server: |
|
if 'request' in kwargs: |
|
text = kwargs['request'].text |
|
generate_kwargs = kwargs['request'].get_generate_kwargs() |
|
else: |
|
text = kwargs['text'] |
|
generate_kwargs = kwargs['generate_kwargs'] |
|
generate_kwargs = self.dict_to_proto(generate_kwargs) |
|
response = self.asyncio_loop.run_until_complete(self.generate_in_tensor_parallel(text, generate_kwargs)).result() |
|
if response.error: |
|
raise Exception(response.error) |
|
else: |
|
return GenerateResponse(text=[r for r in response.texts], num_generated_tokens=[n for n in response.num_generated_tokens]) |
|
else: |
|
if 'request' in kwargs: |
|
request = kwargs['request'] |
|
else: |
|
request = create_generate_request(**kwargs) |
|
response = self.model.generate(request) |
|
if isinstance(response, Exception): |
|
raise response |
|
else: |
|
return response |
|
|
|
def forward(self, request: ForwardRequest) -> ForwardResponse: |
|
if self.use_grpc_server: |
|
response = self.asyncio_loop.run_until_complete(self.forward_in_tensor_parallel(request.conditioning_text, request.response)).result() |
|
if response.error: |
|
raise Exception(response.error) |
|
else: |
|
return ForwardResponse(nll=response.nll) |
|
else: |
|
response = self.model.forward(request) |
|
if isinstance(response, Exception): |
|
raise response |
|
else: |
|
return response |
|
|
|
def tokenize(self, request: TokenizeRequest) -> TokenizeResponse: |
|
if self.use_grpc_server: |
|
response = self.tokenizer(request.text, padding=request.padding) |
|
response = TokenizeResponse(token_ids=response.input_ids, attention_mask=response.attention_mask) |
|
else: |
|
response = self.model.tokenize(request) |
|
return response |
|
|
|
# File: transformers-bloom-inference-main/inference_server/model_handler/grpc_utils/generation_server.py |
|
import os |
|
from concurrent import futures |
|
import torch |
|
import grpc |
|
from ...models import Model |
|
from ...utils import ForwardRequest, TokenizeRequest, create_generate_request, print_rank_0 |
|
from .pb import generation_pb2, generation_pb2_grpc |
|
|
|
class GenerationServer(generation_pb2_grpc.GenerationServiceServicer): |
|
|
|
def __init__(self, model: Model) -> None: |
|
self.model = model |
|
|
|
def _unpack_proto_query_kwargs(self, query_kwargs): |
|
query_kwargs = {k: getattr(v, v.WhichOneof('oneof_values')) for (k, v) in query_kwargs.items()} |
|
return query_kwargs |
|
|
|
def Generate(self, request, context): |
|
text = [r for r in request.texts] |
|
generate_kwargs = self._unpack_proto_query_kwargs(request.generate_kwargs) |
|
request = create_generate_request(text=text, generate_kwargs=generate_kwargs) |
|
local_rank = int(os.getenv('LOCAL_RANK', '0')) |
|
torch.cuda.set_device(local_rank) |
|
self.model.input_device = local_rank |
|
response = self.model.generate(request) |
|
if isinstance(response, Exception): |
|
response = generation_pb2.GenerationResponseProto(error=str(response), is_encoder_decoder=response.is_encoder_decoder) |
|
else: |
|
response = generation_pb2.GenerationResponseProto(texts=response.text, num_generated_tokens=response.num_generated_tokens, is_encoder_decoder=response.is_encoder_decoder) |
|
return response |
|
|
|
def Forward(self, request, context): |
|
conditioning_text = [r for r in request.conditioning_text] |
|
response = [r for r in request.response] |
|
request = ForwardRequest(conditioning_text=conditioning_text, response=response) |
|
local_rank = int(os.getenv('LOCAL_RANK', '0')) |
|
torch.cuda.set_device(local_rank) |
|
self.model.input_device = local_rank |
|
response = self.model.forward(request) |
|
if isinstance(response, Exception): |
|
response = generation_pb2.ForwardResponseProto(error=str(response), is_encoder_decoder=response.is_encoder_decoder) |
|
else: |
|
response = generation_pb2.ForwardResponseProto(nll=response.nll, is_encoder_decoder=response.is_encoder_decoder) |
|
return response |
|
|
|
def serve(inference_pipeline, port): |
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1)) |
|
generation_pb2_grpc.add_GenerationServiceServicer_to_server(GenerationServer(inference_pipeline), server) |
|
server.add_insecure_port(f'[::]:{port}') |
|
print_rank_0('About to start server') |
|
server.start() |
|
print_rank_0('Started') |
|
server.wait_for_termination() |
|
|
|
# File: transformers-bloom-inference-main/inference_server/model_handler/grpc_utils/pb/generation_pb2.py |
|
"""""" |
|
from google.protobuf import descriptor as _descriptor |
|
from google.protobuf import descriptor_pool as _descriptor_pool |
|
from google.protobuf import symbol_database as _symbol_database |
|
from google.protobuf.internal import builder as _builder |
|
_sym_db = _symbol_database.Default() |
|
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10generation.proto\x12\ngeneration"_\n\x05Value\x12\x10\n\x06svalue\x18\x01 \x01(\tH\x00\x12\x10\n\x06ivalue\x18\x02 \x01(\x03H\x00\x12\x10\n\x06fvalue\x18\x03 \x01(\x02H\x00\x12\x10\n\x06bvalue\x18\x04 \x01(\x08H\x00B\x0e\n\x0coneof_values"\xc2\x01\n\x16GenerationRequestProto\x12\r\n\x05texts\x18\x01 \x03(\t\x12O\n\x0fgenerate_kwargs\x18\x02 \x03(\x0b26.generation.GenerationRequestProto.GenerateKwargsEntry\x1aH\n\x13GenerateKwargsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b2\x11.generation.Value:\x028\x01"q\n\x17GenerationResponseProto\x12\r\n\x05texts\x18\x01 \x03(\t\x12\x1c\n\x14num_generated_tokens\x18\x02 \x03(\x05\x12\r\n\x05error\x18\x03 \x01(\t\x12\x1a\n\x12is_encoder_decoder\x18\x04 \x01(\x08"B\n\x13ForwardRequestProto\x12\x19\n\x11conditioning_text\x18\x01 \x03(\t\x12\x10\n\x08response\x18\x02 \x03(\t"N\n\x14ForwardResponseProto\x12\x0b\n\x03nll\x18\x01 \x01(\x02\x12\r\n\x05error\x18\x02 \x01(\t\x12\x1a\n\x12is_encoder_decoder\x18\x03 \x01(\x082\xba\x01\n\x11GenerationService\x12U\n\x08Generate\x12".generation.GenerationRequestProto\x1a#.generation.GenerationResponseProto"\x00\x12N\n\x07Forward\x12\x1f.generation.ForwardRequestProto\x1a .generation.ForwardResponseProto"\x00b\x06proto3') |
|
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) |
|
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'generation_pb2', globals()) |
|
if _descriptor._USE_C_DESCRIPTORS == False: |
|
DESCRIPTOR._options = None |
|
_GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._options = None |
|
_GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._serialized_options = b'8\x01' |
|
_VALUE._serialized_start = 32 |
|
_VALUE._serialized_end = 127 |
|
_GENERATIONREQUESTPROTO._serialized_start = 130 |
|
_GENERATIONREQUESTPROTO._serialized_end = 324 |
|
_GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._serialized_start = 252 |
|
_GENERATIONREQUESTPROTO_GENERATEKWARGSENTRY._serialized_end = 324 |
|
_GENERATIONRESPONSEPROTO._serialized_start = 326 |
|
_GENERATIONRESPONSEPROTO._serialized_end = 439 |
|
_FORWARDREQUESTPROTO._serialized_start = 441 |
|
_FORWARDREQUESTPROTO._serialized_end = 507 |
|
_FORWARDRESPONSEPROTO._serialized_start = 509 |
|
_FORWARDRESPONSEPROTO._serialized_end = 587 |
|
_GENERATIONSERVICE._serialized_start = 590 |
|
_GENERATIONSERVICE._serialized_end = 776 |
|
|
|
# File: transformers-bloom-inference-main/inference_server/model_handler/grpc_utils/pb/generation_pb2_grpc.py |
|
"""""" |
|
import grpc |
|
from . import generation_pb2 as generation__pb2 |
|
|
|
class GenerationServiceStub(object): |
|
|
|
def __init__(self, channel): |
|
self.Generate = channel.unary_unary('/generation.GenerationService/Generate', request_serializer=generation__pb2.GenerationRequestProto.SerializeToString, response_deserializer=generation__pb2.GenerationResponseProto.FromString) |
|
self.Forward = channel.unary_unary('/generation.GenerationService/Forward', request_serializer=generation__pb2.ForwardRequestProto.SerializeToString, response_deserializer=generation__pb2.ForwardResponseProto.FromString) |
|
|
|
class GenerationServiceServicer(object): |
|
|
|
def Generate(self, request, context): |
|
context.set_code(grpc.StatusCode.UNIMPLEMENTED) |
|
context.set_details('Method not implemented!') |
|
raise NotImplementedError('Method not implemented!') |
|
|
|
def Forward(self, request, context): |
|
context.set_code(grpc.StatusCode.UNIMPLEMENTED) |
|
context.set_details('Method not implemented!') |
|
raise NotImplementedError('Method not implemented!') |
|
|
|
def add_GenerationServiceServicer_to_server(servicer, server): |
|
rpc_method_handlers = {'Generate': grpc.unary_unary_rpc_method_handler(servicer.Generate, request_deserializer=generation__pb2.GenerationRequestProto.FromString, response_serializer=generation__pb2.GenerationResponseProto.SerializeToString), 'Forward': grpc.unary_unary_rpc_method_handler(servicer.Forward, request_deserializer=generation__pb2.ForwardRequestProto.FromString, response_serializer=generation__pb2.ForwardResponseProto.SerializeToString)} |
|
generic_handler = grpc.method_handlers_generic_handler('generation.GenerationService', rpc_method_handlers) |
|
server.add_generic_rpc_handlers((generic_handler,)) |
|
|
|
class GenerationService(object): |
|
|
|
@staticmethod |
|
def Generate(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): |
|
return grpc.experimental.unary_unary(request, target, '/generation.GenerationService/Generate', generation__pb2.GenerationRequestProto.SerializeToString, generation__pb2.GenerationResponseProto.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
|
|
|
@staticmethod |
|
def Forward(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): |
|
return grpc.experimental.unary_unary(request, target, '/generation.GenerationService/Forward', generation__pb2.ForwardRequestProto.SerializeToString, generation__pb2.ForwardResponseProto.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
|
|
|
# File: transformers-bloom-inference-main/inference_server/model_handler/launch.py |
|
"""""" |
|
import argparse |
|
import torch.distributed as dist |
|
from ..models import get_model_class, start_inference_engine |
|
from ..utils import get_argument_parser, parse_args |
|
from .grpc_utils.generation_server import serve |
|
|
|
def get_args() -> argparse.Namespace: |
|
parser = get_argument_parser() |
|
group = parser.add_argument_group(title='launch config') |
|
group.add_argument('--local_rank', required=False, type=int, help='used by dist launchers') |
|
group.add_argument('--cpu_offload', action='store_true', help='whether to activate CPU offload for DS ZeRO') |
|
group.add_argument('--ports', nargs='+', help='GRPC ports') |
|
args = parse_args(parser) |
|
return args |
|
|
|
def main(): |
|
args = get_args() |
|
start_inference_engine(args.deployment_framework) |
|
model = get_model_class(args.deployment_framework)(args) |
|
serve(model, args.ports[dist.get_rank()]) |
|
if __name__ == '__main__': |
|
main() |
|
|
|
# File: transformers-bloom-inference-main/inference_server/models/__init__.py |
|
from ..constants import DS_INFERENCE, DS_ZERO, HF_ACCELERATE, HF_CPU |
|
from .model import Model, get_hf_model_class, load_tokenizer |
|
|
|
def get_model_class(deployment_framework: str): |
|
if deployment_framework == HF_ACCELERATE: |
|
from .hf_accelerate import HFAccelerateModel |
|
return HFAccelerateModel |
|
elif deployment_framework == HF_CPU: |
|
from .hf_cpu import HFCPUModel |
|
return HFCPUModel |
|
elif deployment_framework == DS_INFERENCE: |
|
from .ds_inference import DSInferenceModel |
|
return DSInferenceModel |
|
elif deployment_framework == DS_ZERO: |
|
from .ds_zero import DSZeROModel |
|
return DSZeROModel |
|
else: |
|
raise ValueError(f'Unknown deployment framework {deployment_framework}') |
|
|
|
def start_inference_engine(deployment_framework: str) -> None: |
|
if deployment_framework in [DS_INFERENCE, DS_ZERO]: |
|
import deepspeed |
|
deepspeed.init_distributed('nccl') |
|
|
|
# File: transformers-bloom-inference-main/inference_server/models/ds_inference.py |
|
import glob |
|
import io |
|
import json |
|
import os |
|
from argparse import Namespace |
|
from functools import partial |
|
import torch |
|
import deepspeed |
|
from huggingface_hub import try_to_load_from_cache |
|
from transformers import AutoConfig |
|
from ..utils import get_world_size, run_rank_n |
|
from .model import Model, get_hf_model_class |
|
|
|
class DSInferenceModel(Model): |
|
|
|
def __init__(self, args: Namespace) -> None: |
|
super().__init__(args) |
|
with deepspeed.OnDevice(dtype=torch.float16, device='meta'): |
|
self.model = get_hf_model_class(args.model_class).from_config(AutoConfig.from_pretrained(args.model_name), torch_dtype=torch.bfloat16) |
|
self.model = self.model.eval() |
|
downloaded_model_path = get_model_path(args.model_name) |
|
if args.dtype in [torch.float16, torch.int8]: |
|
checkpoints_json = os.path.join(downloaded_model_path, 'ds_inference_config.json') |
|
if os.path.isfile(checkpoints_json): |
|
self.model = deepspeed.init_inference(self.model, mp_size=get_world_size(), base_dir=downloaded_model_path, dtype=args.dtype, checkpoint=checkpoints_json, replace_with_kernel_inject=True) |
|
else: |
|
with TemporaryCheckpointsJSON(downloaded_model_path) as checkpoints_json: |
|
self.model = deepspeed.init_inference(self.model, mp_size=get_world_size(), base_dir=downloaded_model_path, dtype=args.dtype, checkpoint=checkpoints_json, replace_with_kernel_inject=True) |
|
elif args.dtype == torch.bfloat16: |
|
raise NotImplementedError('bfloat16 is not yet supported') |
|
self.model = self.model.module |
|
self.input_device = torch.cuda.current_device() |
|
self.post_init(args.model_name) |
|
|
|
class TemporaryCheckpointsJSON: |
|
|
|
def __init__(self, model_path: str): |
|
self.tmp_directory = 'tmp' |
|
self.tmp_file = os.path.join(self.tmp_directory, 'checkpoints.json') |
|
self.model_path = model_path |
|
|
|
def write_checkpoints_json(self) -> None: |
|
print(self.model_path) |
|
with io.open(self.tmp_file, 'w', encoding='utf-8') as f: |
|
data = {'type': 'BLOOM', 'checkpoints': glob.glob(f'{self.model_path}/*.bin'), 'version': 1.0} |
|
json.dump(data, f) |
|
|
|
def __enter__(self): |
|
run_rank_n(os.makedirs, barrier=True)(self.tmp_directory, exist_ok=True) |
|
run_rank_n(self.write_checkpoints_json, barrier=True)() |
|
return self.tmp_file |
|
|
|
def __exit__(self, type, value, traceback): |
|
return |
|
|
|
def get_model_path(model_name: str): |
|
try: |
|
config_file = 'config.json' |
|
config_path = try_to_load_from_cache(model_name, config_file, cache_dir=os.getenv('TRANSFORMERS_CACHE')) |
|
if config_path is None: |
|
return model_name |
|
else: |
|
return os.path.dirname(config_path) |
|
except: |
|
return model_name |
|
|
|
# File: transformers-bloom-inference-main/inference_server/models/ds_zero.py |
|
from argparse import Namespace |
|
import torch |
|
import deepspeed |
|
from transformers import AutoConfig |
|
from transformers.deepspeed import HfDeepSpeedConfig |
|
from ..utils import get_world_size |
|
from .model import Model, get_hf_model_class |
|
|
|
class DSZeROModel(Model): |
|
|
|
def __init__(self, args: Namespace) -> None: |
|
super().__init__(args) |
|
config = AutoConfig.from_pretrained(args.model_name) |
|
train_micro_batch_size_per_gpu = 1 |
|
train_batch_size = train_micro_batch_size_per_gpu * get_world_size() |
|
ds_config = {'fp16': {'enabled': args.dtype == torch.float16}, 'bf16': {'enabled': args.dtype == torch.bfloat16}, 'zero_optimization': {'stage': 3, 'overlap_comm': True, 'contiguous_gradients': True, 'reduce_bucket_size': config.hidden_size * config.hidden_size, 'stage3_prefetch_bucket_size': 0.9 * config.hidden_size * config.hidden_size, 'stage3_param_persistence_threshold': 0}, 'steps_per_print': 2000, 'train_batch_size': train_batch_size, 'train_micro_batch_size_per_gpu': train_micro_batch_size_per_gpu, 'wall_clock_breakdown': False} |
|
if args.cpu_offload: |
|
ds_config['zero_optimization']['offload_param'] = {'device': 'cpu', 'pin_memory': True} |
|
dschf = HfDeepSpeedConfig(ds_config) |
|
self.model = get_hf_model_class(args.model_class).from_pretrained(args.model_name, torch_dtype=args.dtype) |
|
self.model = self.model.eval() |
|
self.model = deepspeed.initialize(model=self.model, config_params=ds_config)[0] |
|
self.model.module.eval() |
|
self.model = self.model.module |
|
self.input_device = torch.cuda.current_device() |
|
self.post_init(args.model_name) |
|
|
|
# File: transformers-bloom-inference-main/inference_server/models/hf_accelerate.py |
|
from argparse import Namespace |
|
import torch |
|
from ..utils import get_world_size |
|
from .model import Model, get_hf_model_class |
|
|
|
class HFAccelerateModel(Model): |
|
|
|
def __init__(self, args: Namespace) -> None: |
|
super().__init__(args) |
|
kwargs = {'pretrained_model_name_or_path': args.model_name, 'device_map': 'auto'} |
|
if get_world_size() > 1: |
|
kwargs['device_map'] = 'balanced_low_0' |
|
if args.dtype == torch.int8: |
|
kwargs['load_in_8bit'] = True |
|
else: |
|
kwargs['torch_dtype'] = args.dtype |
|
self.model = get_hf_model_class(args.model_class).from_pretrained(**kwargs) |
|
self.model.requires_grad_(False) |
|
self.model.eval() |
|
self.input_device = 'cuda:0' |
|
self.post_init(args.model_name) |
|
|
|
# File: transformers-bloom-inference-main/inference_server/models/model.py |
|
import argparse |
|
import copy |
|
from typing import List, Union |
|
import torch |
|
import transformers |
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig |
|
from ..utils import ForwardRequest, ForwardResponse, GenerateRequest, GenerateResponse, TokenizeRequest, TokenizeResponse |
|
|
|
class Model: |
|
|
|
def __init__(self, args: argparse.Namespace) -> None: |
|
self.model = None |
|
self.input_device = None |
|
self.max_input_length = args.max_input_length |
|
self.max_batch_size = args.max_batch_size |
|
|
|
def post_init(self, model_name: str) -> None: |
|
self.is_encoder_decoder = AutoConfig.from_pretrained(model_name).is_encoder_decoder |
|
self.generation_config = GenerationConfig.from_model_config(AutoConfig.from_pretrained(model_name)) |
|
self.tokenizer = load_tokenizer(model_name) |
|
self.pad = self.tokenizer.pad_token_id |
|
self.prefix_token_id = self.tokenizer('A')['input_ids'][0] |
|
|
|
def get_generation_config(self, request: GenerateRequest) -> GenerationConfig: |
|
generation_config = copy.deepcopy(self.generation_config) |
|
request = dict(request) |
|
request_filtered = {} |
|
for (key, value) in request.items(): |
|
if value is not None and key not in ['text', 'remove_input_from_output']: |
|
request_filtered[key] = value |
|
request_filtered['return_dict_in_generate'] = True |
|
generation_config.update(**request_filtered) |
|
return generation_config |
|
|
|
def generate(self, request: GenerateRequest) -> Union[GenerateResponse, Exception]: |
|
try: |
|
batch_size = len(request.text) |
|
check_batch_size(batch_size, self.max_batch_size) |
|
input_tokens = self.tokenizer(request.text, return_tensors='pt', padding=True) |
|
max_input_length_in_batch = input_tokens.input_ids[0].shape[0] |
|
check_max_input_length(max_input_length_in_batch, self.max_input_length) |
|
for t in input_tokens: |
|
if torch.is_tensor(input_tokens[t]): |
|
input_tokens[t] = input_tokens[t].to(self.input_device) |
|
num_input_tokens = input_tokens['input_ids'].shape[1] |
|
generation_config = self.get_generation_config(request) |
|
output = self.model.generate(**input_tokens, generation_config=generation_config) |
|
output_tokens = output.sequences |
|
if self.is_encoder_decoder: |
|
num_generated_tokens = (output_tokens != self.pad).sum(dim=-1).tolist() |
|
generated_text = self.tokenizer.batch_decode(output_tokens, skip_special_tokens=True) |
|
else: |
|
generated_tokens = output_tokens[:, num_input_tokens:] |
|
num_generated_tokens = (generated_tokens != self.pad).sum(dim=-1).tolist() |
|
if request.remove_input_from_output: |
|
prefix_to_add = torch.tensor([[self.prefix_token_id]] * batch_size).to(self.input_device) |
|
generated_tokens = torch.cat([prefix_to_add, generated_tokens], dim=1) |
|
generated_text = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) |
|
generated_text = [i[1:] for i in generated_text] |
|
else: |
|
generated_text = self.tokenizer.batch_decode(output_tokens, skip_special_tokens=True) |
|
return GenerateResponse(text=generated_text, num_generated_tokens=num_generated_tokens, is_encoder_decoder=self.is_encoder_decoder) |
|
except Exception as exception: |
|
return exception |
|
|
|
def forward(self, request: ForwardRequest) -> Union[ForwardResponse, Exception]: |
|
|
|
def prepare_tensors(conditioning_tokens: List[List[int]], response_tokens: List[List[int]]): |
|
bs = len(conditioning_tokens) |
|
input_ids = [conditioning_tokens[i] + response_tokens[i] for i in range(bs)] |
|
attention_mask = [[1] * (len(conditioning_tokens[i]) + len(response_tokens[i])) for i in range(bs)] |
|
labels = [[-100] * len(conditioning_tokens[i]) + response_tokens[i] for i in range(bs)] |
|
input_ids = pad(input_ids, self.tokenizer.pad_token_id) |
|
attention_mask = pad(attention_mask, 0) |
|
labels = pad(labels, -100) |
|
return {'input_ids': torch.tensor(input_ids), 'attention_mask': torch.tensor(attention_mask), 'labels': torch.tensor(labels)} |
|
|
|
def pad(arrays: list, padding: int, max_length: int=None): |
|
if max_length is None: |
|
max_length = max(list(map(len, arrays))) |
|
arrays = [[padding] * (max_length - len(array)) + array for array in arrays] |
|
return arrays |
|
try: |
|
batch_size = len(request.conditioning_text) |
|
check_batch_size(batch_size, self.max_batch_size) |
|
conditioning_tokens = self.tokenizer(request.conditioning_text)['input_ids'] |
|
response_tokens = self.tokenizer(request.response)['input_ids'] |
|
max_length_in_batch = max([len(conditioning_tokens) + len(response_tokens)]) |
|
check_max_input_length(max_length_in_batch, self.max_input_length) |
|
input_tokens = prepare_tensors(conditioning_tokens, response_tokens) |
|
for t in input_tokens: |
|
if torch.is_tensor(input_tokens[t]): |
|
input_tokens[t] = input_tokens[t].to(self.input_device) |
|
loss = self.model(**input_tokens).loss |
|
return ForwardResponse(nll=loss.item(), is_encoder_decoder=self.is_encoder_decoder) |
|
except Exception as exception: |
|
return exception |
|
|
|
def tokenize(self, request: TokenizeRequest) -> TokenizeResponse: |
|
return TokenizeResponse(token_ids=self.tokenizer(request.text).input_ids, is_encoder_decoder=self.is_encoder_decoder) |
|
|
|
def check_max_input_length(input_token_length: int, max_input_length: int) -> None: |
|
if max_input_length is None: |
|
return |
|
if input_token_length > max_input_length: |
|
raise Exception(f'max supported input length = {max_input_length} for now') |
|
|
|
def check_batch_size(batch_size: int, max_batch_size: int) -> None: |
|
if max_batch_size is None: |
|
return |
|
if batch_size > max_batch_size: |
|
raise Exception(f'max supported batch size = {max_batch_size} for now') |
|
|
|
def get_hf_model_class(model_class: str) -> Union[AutoModelForCausalLM, AutoModelForSeq2SeqLM]: |
|
return getattr(transformers, model_class) |
|
|
|
def load_tokenizer(model_name: str) -> AutoTokenizer: |
|
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left') |
|
if tokenizer.pad_token_id is None: |
|
tokenizer.add_special_tokens({'pad_token': '[PAD]'}) |
|
return tokenizer |
|
|
|
# File: transformers-bloom-inference-main/inference_server/server.py |
|
import os |
|
from functools import partial |
|
from flask import Flask, request |
|
from flask_api import status |
|
from pydantic import BaseModel |
|
from .constants import HF_ACCELERATE |
|
from .model_handler.deployment import ModelDeployment |
|
from .utils import ForwardRequest, GenerateRequest, TokenizeRequest, get_exception_response, get_num_tokens_to_generate, get_torch_dtype, parse_bool, run_and_log_time |
|
|
|
class QueryID(BaseModel): |
|
generate_query_id: int = 0 |
|
tokenize_query_id: int = 0 |
|
forward_query_id: int = 0 |
|
|
|
class Args: |
|
|
|
def __init__(self) -> None: |
|
self.deployment_framework = os.getenv('DEPLOYMENT_FRAMEWORK', HF_ACCELERATE) |
|
self.model_name = os.getenv('MODEL_NAME') |
|
self.model_class = os.getenv('MODEL_CLASS') |
|
self.dtype = get_torch_dtype(os.getenv('DTYPE')) |
|
self.allowed_max_new_tokens = int(os.getenv('ALLOWED_MAX_NEW_TOKENS', 100)) |
|
self.max_input_length = int(os.getenv('MAX_INPUT_LENGTH', 512)) |
|
self.max_batch_size = int(os.getenv('MAX_BATCH_SIZE', 4)) |
|
self.debug = parse_bool(os.getenv('DEBUG', 'false')) |
|
args = Args() |
|
model = ModelDeployment(args, True) |
|
query_ids = QueryID() |
|
app = Flask(__name__) |
|
|
|
@app.route('/query_id/', methods=['GET']) |
|
def query_id(): |
|
return (query_ids.dict(), status.HTTP_200_OK) |
|
|
|
@app.route('/tokenize/', methods=['POST']) |
|
def tokenize(): |
|
try: |
|
x = request.get_json() |
|
x = TokenizeRequest(**x) |
|
(response, total_time_taken) = run_and_log_time(partial(model.tokenize, request=x)) |
|
response.query_id = query_ids.tokenize_query_id |
|
query_ids.tokenize_query_id += 1 |
|
response.total_time_taken = '{:.2f} msecs'.format(total_time_taken * 1000) |
|
return (response.dict(), status.HTTP_200_OK) |
|
except Exception: |
|
response = get_exception_response(query_ids.tokenize_query_id, args.debug) |
|
query_ids.tokenize_query_id += 1 |
|
return (response, status.HTTP_500_INTERNAL_SERVER_ERROR) |
|
|
|
@app.route('/generate/', methods=['POST']) |
|
def generate(): |
|
try: |
|
x = request.get_json() |
|
x = GenerateRequest(**x) |
|
x.max_new_tokens = get_num_tokens_to_generate(x.max_new_tokens, args.allowed_max_new_tokens) |
|
(response, total_time_taken) = run_and_log_time(partial(model.generate, request=x)) |
|
response.query_id = query_ids.generate_query_id |
|
query_ids.generate_query_id += 1 |
|
response.total_time_taken = '{:.2f} secs'.format(total_time_taken) |
|
return (response.dict(), status.HTTP_200_OK) |
|
except Exception: |
|
response = get_exception_response(query_ids.generate_query_id, args.debug) |
|
query_ids.generate_query_id += 1 |
|
return (response, status.HTTP_500_INTERNAL_SERVER_ERROR) |
|
|
|
@app.route('/forward/', methods=['POST']) |
|
def forward(): |
|
try: |
|
x = request.get_json() |
|
x = ForwardRequest(**x) |
|
if len(x.conditioning_text) != len(x.response): |
|
raise Exception('unequal number of elements in conditioning_text and response arguments') |
|
(response, total_time_taken) = run_and_log_time(partial(model.forward, request=x)) |
|
response.query_id = query_ids.forward_query_id |
|
query_ids.forward_query_id += 1 |
|
response.total_time_taken = '{:.2f} secs'.format(total_time_taken) |
|
return (response.dict(), status.HTTP_200_OK) |
|
except Exception: |
|
response = get_exception_response(query_ids.forward_query_id, args.debug) |
|
query_ids.forward_query_id += 1 |
|
return (response, status.HTTP_500_INTERNAL_SERVER_ERROR) |
|
|
|
# File: transformers-bloom-inference-main/server_request.py |
|
import argparse |
|
import requests |
|
|
|
def get_args() -> argparse.Namespace: |
|
parser = argparse.ArgumentParser() |
|
group = parser.add_argument_group(title='launch config') |
|
group.add_argument('--host', type=str, required=True, help='host address') |
|
group.add_argument('--port', type=int, required=True, help='port number') |
|
return parser.parse_args() |
|
|
|
def generate(url: str) -> None: |
|
url = url + '/generate/' |
|
request_body = {'text': ['DeepSpeed', 'DeepSpeed is a', 'DeepSpeed is a machine', 'DeepSpeed is a machine learning framework'], 'max_new_tokens': 40} |
|
response = requests.post(url=url, json=request_body, verify=False) |
|
print(response.json(), '\n') |
|
|
|
def tokenize(url: str) -> None: |
|
url = url + '/tokenize/' |
|
request_body = {'text': ['DeepSpeed is a', 'DeepSpeed is a machine learning framework']} |
|
response = requests.post(url=url, json=request_body, verify=False) |
|
print(response.json(), '\n') |
|
|
|
def forward(url: str) -> None: |
|
url = url + '/forward/' |
|
request_body = {'conditioning_text': ['DeepSpeed', 'DeepSpeed is a', 'DeepSpeed is a machine', 'DeepSpeed is a machine learning framework'], 'response': ['DeepSpeed', 'DeepSpeed is a', 'DeepSpeed is a machine', 'DeepSpeed is a machine learning framework']} |
|
response = requests.post(url=url, json=request_body, verify=False) |
|
print(response.json(), '\n') |
|
|
|
def query_id(url: str) -> None: |
|
url = url + '/query_id/' |
|
response = requests.get(url=url, verify=False) |
|
print(response.json(), '\n') |
|
|
|
def main(): |
|
args = get_args() |
|
url = 'http://{}:{}'.format(args.host, args.port) |
|
generate(url) |
|
tokenize(url) |
|
forward(url) |
|
query_id(url) |
|
if __name__ == '__main__': |
|
main() |
|
|
|
# File: transformers-bloom-inference-main/ui.py |
|
import argparse |
|
import requests |
|
from fastapi import FastAPI, Request |
|
from fastapi.middleware.cors import CORSMiddleware |
|
from fastapi.responses import HTMLResponse, JSONResponse |
|
from fastapi.routing import APIRoute, Mount |
|
from fastapi.staticfiles import StaticFiles |
|
from fastapi.templating import Jinja2Templates |
|
from transformers import AutoTokenizer |
|
from uvicorn import run |
|
|
|
def get_args() -> argparse.Namespace: |
|
parser = argparse.ArgumentParser() |
|
group = parser.add_argument_group(title='launch config') |
|
group.add_argument('--ui_host', type=str, default='127.0.0.1', help='host address for UI') |
|
group.add_argument('--ui_port', type=int, default=5001, help='port number for UI') |
|
group.add_argument('--generation_backend_host', type=str, default='127.0.0.1', help='host address for generation server') |
|
group.add_argument('--generation_backend_port', type=int, default=5000, help='port number for generation server') |
|
return parser.parse_args() |
|
|
|
class Server: |
|
|
|
def __init__(self, args: argparse.Namespace): |
|
self.templates = Jinja2Templates(directory='templates') |
|
self.ui_host = args.ui_host |
|
self.ui_port = args.ui_port |
|
self.generation_backend_host = args.generation_backend_host |
|
self.generation_backend_port = args.generation_backend_port |
|
self.workers = 1 |
|
self.tokenizer = AutoTokenizer.from_pretrained('bigscience/bloom') |
|
self.app = FastAPI(routes=[APIRoute('/', self.homepage, methods=['GET'], response_class=HTMLResponse), APIRoute('/generate/', self.generate, methods=['POST']), Mount('/static/', StaticFiles(directory='static'), name='static')], timeout=600) |
|
self.prefix_checkpoints_list = None |
|
|
|
def homepage(self, request: Request) -> HTMLResponse: |
|
return self.templates.TemplateResponse('index.html', {'request': request}) |
|
|
|
def generate(self, request: dict) -> JSONResponse: |
|
response = requests.post(f'http://{self.generation_backend_host}:{self.generation_backend_port}/generate', json=request, verify=False) |
|
return JSONResponse(content=response.json()) |
|
|
|
def run(self): |
|
self.app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_credentials=True, allow_methods=['*'], allow_headers=['*']) |
|
run(self.app, host=self.ui_host, port=self.ui_port, workers=self.workers) |
|
|
|
def main() -> None: |
|
Server(get_args()).run() |
|
if __name__ == '__main__': |
|
main() |
|
|
|
|