Paul Rock
commited on
Commit
•
568b8fe
1
Parent(s):
f30df32
Internal prompt sample added
Browse files- saiga_v2.json +10 -0
- test_lora.py +1 -1
saiga_v2.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"system_prompt": "Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им.",
|
3 |
+
"system_message_template": "<s>{role}\n{content}</s>\n",
|
4 |
+
"user_message_template": "<s>{role}\n{content}</s>\n",
|
5 |
+
"bot_message_template": "<s>{role}\n{content}</s>\n",
|
6 |
+
"user_role": "user",
|
7 |
+
"bot_role": "bot",
|
8 |
+
"system_role": "system",
|
9 |
+
"suffix": "<s>bot"
|
10 |
+
}
|
test_lora.py
CHANGED
@@ -105,7 +105,7 @@ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
|
|
105 |
generation_config = GenerationConfig.from_pretrained(MODEL_NAME)
|
106 |
print(generation_config)
|
107 |
|
108 |
-
template_path = '
|
109 |
conversation = Conversation()
|
110 |
while True:
|
111 |
user_message = input("User: ")
|
|
|
105 |
generation_config = GenerationConfig.from_pretrained(MODEL_NAME)
|
106 |
print(generation_config)
|
107 |
|
108 |
+
template_path = 'saiga_v2.json'
|
109 |
conversation = Conversation()
|
110 |
while True:
|
111 |
user_message = input("User: ")
|