alexmarques commited on
Commit
a5278f0
1 Parent(s): be01205

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -47,6 +47,7 @@ from transformers import AutoTokenizer
47
 
48
  model_id = "neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a8"
49
  number_gpus = 1
 
50
 
51
  sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256)
52
 
@@ -59,7 +60,7 @@ messages = [
59
 
60
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
61
 
62
- llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
63
 
64
  outputs = llm.generate(prompts, sampling_params)
65
 
 
47
 
48
  model_id = "neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a8"
49
  number_gpus = 1
50
+ max_model_len = 8192
51
 
52
  sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256)
53
 
 
60
 
61
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
62
 
63
+ llm = LLM(model=model_id, tensor_parallel_size=number_gpus, max_model_len=max_model_len)
64
 
65
  outputs = llm.generate(prompts, sampling_params)
66