fix(root): Replaces system by user to improve generation experience.
Browse files- README.md +0 -5
- tokenizer_config.json +2 -2
README.md
CHANGED
@@ -73,8 +73,6 @@ You can provide the prompt as a question with a generic template as follow:
|
|
73 |
```
|
74 |
For example:
|
75 |
```markdown
|
76 |
-
<|system|>
|
77 |
-
You are a helpful AI assistant.<|end|>
|
78 |
<|user|>
|
79 |
How to explain Internet for a medieval knight?<|end|>
|
80 |
<|assistant|>
|
@@ -83,8 +81,6 @@ How to explain Internet for a medieval knight?<|end|>
|
|
83 |
where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:
|
84 |
|
85 |
```markdown
|
86 |
-
<|system|>
|
87 |
-
You are a helpful AI assistant.<|end|>
|
88 |
<|user|>
|
89 |
I am going to Paris, what should I see?<|end|>
|
90 |
<|assistant|>
|
@@ -113,7 +109,6 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
113 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
114 |
|
115 |
messages = [
|
116 |
-
{"role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user."},
|
117 |
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
|
118 |
{"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
|
119 |
{"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
|
|
|
73 |
```
|
74 |
For example:
|
75 |
```markdown
|
|
|
|
|
76 |
<|user|>
|
77 |
How to explain Internet for a medieval knight?<|end|>
|
78 |
<|assistant|>
|
|
|
81 |
where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:
|
82 |
|
83 |
```markdown
|
|
|
|
|
84 |
<|user|>
|
85 |
I am going to Paris, what should I see?<|end|>
|
86 |
<|assistant|>
|
|
|
109 |
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
110 |
|
111 |
messages = [
|
|
|
112 |
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
|
113 |
{"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
|
114 |
{"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
|
tokenizer_config.json
CHANGED
@@ -116,7 +116,7 @@
|
|
116 |
}
|
117 |
},
|
118 |
"bos_token": "<s>",
|
119 |
-
"chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == '
|
120 |
"clean_up_tokenization_spaces": false,
|
121 |
"eos_token": "<|endoftext|>",
|
122 |
"legacy": false,
|
@@ -127,4 +127,4 @@
|
|
127 |
"tokenizer_class": "LlamaTokenizer",
|
128 |
"unk_token": "<unk>",
|
129 |
"use_default_system_prompt": false
|
130 |
-
}
|
|
|
116 |
}
|
117 |
},
|
118 |
"bos_token": "<s>",
|
119 |
+
"chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
|
120 |
"clean_up_tokenization_spaces": false,
|
121 |
"eos_token": "<|endoftext|>",
|
122 |
"legacy": false,
|
|
|
127 |
"tokenizer_class": "LlamaTokenizer",
|
128 |
"unk_token": "<unk>",
|
129 |
"use_default_system_prompt": false
|
130 |
+
}
|