Text Generation
Transformers
Safetensors
English
falcon_mamba
conversational
Inference Endpoints
ybelkada commited on
Commit
3d7c55a
1 Parent(s): 5e1687c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -59,7 +59,7 @@ messages = [
59
  ]
60
 
61
  input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
62
- input_ids = tokenizer(input_text, return_tensors="pt")
63
 
64
  outputs = model.generate(input_ids, max_new_tokens=30)
65
  print(tokenizer.decode(outputs[0]))
 
59
  ]
60
 
61
  input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
62
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
63
 
64
  outputs = model.generate(input_ids, max_new_tokens=30)
65
  print(tokenizer.decode(outputs[0]))