limhyeonseok
commited on
Commit
•
897ebfe
1
Parent(s):
eb39122
Update README.md
Browse files
README.md
CHANGED
@@ -119,8 +119,9 @@ prompt = tokenizer.apply_chat_template(
|
|
119 |
generation_kwargs = {
|
120 |
"max_tokens":512,
|
121 |
"stop":["<|eot_id|>"],
|
|
|
|
|
122 |
"echo":True, # Echo the prompt in the output
|
123 |
-
"top_k":1 # This is essentially greedy decoding, since the model will always return the highest-probability token. Set this value > 1 for sampling decoding
|
124 |
}
|
125 |
|
126 |
resonse_msg = model(prompt, **generation_kwargs)
|
|
|
119 |
generation_kwargs = {
|
120 |
"max_tokens":512,
|
121 |
"stop":["<|eot_id|>"],
|
122 |
+
"top-p":0.9,
|
123 |
+
"temperature":0.6,
|
124 |
"echo":True, # Echo the prompt in the output
|
|
|
125 |
}
|
126 |
|
127 |
resonse_msg = model(prompt, **generation_kwargs)
|