Spaces:
Runtime error
Runtime error
init repository
Browse files- README.md +32 -3
- app.py +197 -0
- chat_template.py +1308 -0
- img/Interface.png +0 -0
- requirements-dev.txt +1 -0
- requirements.txt +3 -0
- templates/airoboros_v1.jinja2 +17 -0
- templates/airoboros_v2.jinja2 +17 -0
- templates/chat-ml.jinja2 +3 -0
- templates/falcon-chat.jinja2 +16 -0
- templates/llama-2.jinja2 +12 -0
- templates/mistral-7b-openorca.jinja2 +9 -0
- templates/open-orca.jinja2 +15 -0
- templates/openhermes-2.5-mistral.jinja2 +9 -0
- templates/orca2.jinja2 +9 -0
- templates/vicuna_v1.1.jinja2 +17 -0
- templates/zephyr.jinja2 +12 -0
- tests_template/__pycache__/test_llama2.cpython-310-pytest-7.4.3.pyc +0 -0
- tests_template/__pycache__/test_orca2 copy.cpython-310-pytest-7.4.3.pyc +0 -0
- tests_template/__pycache__/test_orca2.cpython-310-pytest-7.4.3.pyc +0 -0
- tests_template/__pycache__/utils.cpython-310.pyc +0 -0
- tests_template/test_airoboros_v1.py +62 -0
- tests_template/test_airoboros_v2.py +62 -0
- tests_template/test_llama2.py +57 -0
- tests_template/test_mistral-7b-openorca.py +62 -0
- tests_template/test_openhermes-2.5-mistral.py +62 -0
- tests_template/test_openorca.py +56 -0
- tests_template/test_orca2.py +50 -0
- tests_template/test_vicuna_v1.1.py +62 -0
- tests_template/utils.py +7 -0
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
title: Chat Template Generation
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.28.2
|
8 |
app_file: app.py
|
@@ -10,4 +10,33 @@ pinned: false
|
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
1 |
---
|
2 |
title: Chat Template Generation
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: gray
|
6 |
sdk: streamlit
|
7 |
sdk_version: 1.28.2
|
8 |
app_file: app.py
|
|
|
10 |
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
+
# Chat Template Generation: Make Chat Easier with Huggingface Tokenizer
|
14 |
+
![interface](./img/Interface.png)
|
15 |
+
|
16 |
+
This streamlit app is to serve as an easier way to check and push the chat template to your/exisiting huggingface repo
|
17 |
+
Current predefined templates:
|
18 |
+
- open-orca.jinja2
|
19 |
+
- mistral-7b-openorca.jinja2
|
20 |
+
- orca2.jinja2
|
21 |
+
- airoboros_v2.jinja2
|
22 |
+
- airoboros_v1.jinja2
|
23 |
+
- openhermes-2.5-mistral.jinja2
|
24 |
+
- zephyr.jinja2
|
25 |
+
- falcon-chat.jinja2
|
26 |
+
- chat-ml.jinja2
|
27 |
+
- llama-2.jinja2
|
28 |
+
- vicuna_v1.1.jinja2
|
29 |
+
|
30 |
+
More templates will be predefined for easier setup of chat template.
|
31 |
+
|
32 |
+
With this interface you can easily
|
33 |
+
- update your tokenizer_config.json with chat-template attributes with predefined prompt template or custom template so that you can use `tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)` to generate chat prompt.
|
34 |
+
- contribute to existing model repository to support chat-template attributes
|
35 |
+
- review if your prompt template matches with your expected prompt
|
36 |
+
|
37 |
+
## To run locally
|
38 |
+
1. `python -m pip install -r requirements.txt`
|
39 |
+
2. `streamlit run app.py`
|
40 |
+
|
41 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
42 |
+
|
app.py
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer
|
3 |
+
import json
|
4 |
+
import tempfile
|
5 |
+
import os
|
6 |
+
import uuid
|
7 |
+
import copy
|
8 |
+
import shutil
|
9 |
+
|
10 |
+
st.set_page_config(layout="wide")
|
11 |
+
|
12 |
+
def sanitize_jinja2(jinja_lines):
|
13 |
+
|
14 |
+
one_liner_jinja = ""
|
15 |
+
for line in jinja_lines:
|
16 |
+
one_liner_jinja += line.lstrip(" ").rstrip("\n")
|
17 |
+
|
18 |
+
return one_liner_jinja
|
19 |
+
|
20 |
+
@st.cache_resource
|
21 |
+
def get_existing_templates():
|
22 |
+
return [None] + os.listdir("./templates")
|
23 |
+
|
24 |
+
if len(os.listdir("./tmp")) > 20:
|
25 |
+
shutil.rmtree('./tmp')
|
26 |
+
|
27 |
+
# Initialization
|
28 |
+
if 'tokenizer_json' not in st.session_state:
|
29 |
+
st.session_state['tokenizer_json'] = None
|
30 |
+
|
31 |
+
if 'tokenizer' not in st.session_state:
|
32 |
+
st.session_state['tokenizer'] = None
|
33 |
+
|
34 |
+
if 'repo_normalized_name' not in st.session_state:
|
35 |
+
st.session_state['repo_normalized_name'] = None
|
36 |
+
|
37 |
+
if 'repo_id' not in st.session_state:
|
38 |
+
st.session_state['repo_id'] = None
|
39 |
+
|
40 |
+
if 'input_jinja_template' not in st.session_state:
|
41 |
+
st.session_state['input_jinja_template'] = ""
|
42 |
+
|
43 |
+
if 'uuid' not in st.session_state:
|
44 |
+
st.session_state['uuid'] = uuid.uuid4()
|
45 |
+
os.makedirs(f"./tmp/{st.session_state['uuid']}")
|
46 |
+
|
47 |
+
if 'successful_template' not in st.session_state:
|
48 |
+
st.session_state['successful_template'] = ''
|
49 |
+
|
50 |
+
if 'generated_prompt_w_add_generation_prompt' not in st.session_state:
|
51 |
+
st.session_state['generated_prompt_w_add_generation_prompt'] = ''
|
52 |
+
|
53 |
+
if 'generated_prompt_wo_add_generation_prompt' not in st.session_state:
|
54 |
+
st.session_state['generated_prompt_wo_add_generation_prompt'] = ''
|
55 |
+
|
56 |
+
if not os.path.exists("./tmp"):
|
57 |
+
os.makedirs("./tmp")
|
58 |
+
|
59 |
+
title_description = """
|
60 |
+
Chat Template Generation: Make Chat Easier with Huggingface Tokenizer
|
61 |
+
"""
|
62 |
+
|
63 |
+
st.title(title_description)
|
64 |
+
st.markdown('This streamlit app is to serve as an easier way to check and push the chat template to your/exisiting huggingface repo')
|
65 |
+
|
66 |
+
list_of_templates = get_existing_templates()
|
67 |
+
with st.expander("Current predefined templates"):
|
68 |
+
for model in list_of_templates[1:]:
|
69 |
+
st.markdown(f"- {model}")
|
70 |
+
st.info('More templates will be predefined for easier setup of chat template.', icon="ℹ️")
|
71 |
+
|
72 |
+
st.divider()
|
73 |
+
# custom_repo_tab, prebuilt_template_tab = st.tabs(["Specify Custom Repository Path", "Select Prebuilt Template"])
|
74 |
+
|
75 |
+
hf_model_repo_name = st.text_input("Hugging Face Model Repository To Update", value="tiiuae/falcon-7b", max_chars=None, key=None, type="default",
|
76 |
+
help=None, autocomplete=None, label_visibility="visible")
|
77 |
+
|
78 |
+
gen_button = st.button("Get Tokenizer Config")
|
79 |
+
|
80 |
+
if gen_button:
|
81 |
+
with st.spinner(text="In progress...", cache=False):
|
82 |
+
st.session_state['repo_id'] = hf_model_repo_name
|
83 |
+
st.session_state['tokenizer'] = AutoTokenizer.from_pretrained(hf_model_repo_name)
|
84 |
+
|
85 |
+
st.session_state['repo_normalized_name'] = hf_model_repo_name.replace("/", "_")
|
86 |
+
st.session_state['tokenizer'].save_pretrained(f"./tmp/{st.session_state['uuid']}_{hf_model_repo_name}")
|
87 |
+
st.session_state['tokenizer_json'] = f"./tmp/{st.session_state['uuid']}_{hf_model_repo_name}"
|
88 |
+
|
89 |
+
if st.session_state['tokenizer_json'] is not None:
|
90 |
+
with open(f"{st.session_state['tokenizer_json']}/tokenizer_config.json", "rb") as f:
|
91 |
+
tokenizer_json = json.load(f)
|
92 |
+
|
93 |
+
json_spec, col2 = st.columns(spec=[0.3, 0.7])
|
94 |
+
|
95 |
+
|
96 |
+
with json_spec:
|
97 |
+
st.markdown(f"### Tokenizer Config from {st.session_state['repo_normalized_name']}")
|
98 |
+
st.json(json.dumps(tokenizer_json, indent=4))
|
99 |
+
|
100 |
+
with col2:
|
101 |
+
chat = [
|
102 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
103 |
+
{"role": "user", "content": "Hello, how are you?"},
|
104 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
105 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
106 |
+
]
|
107 |
+
st.markdown("### Example Conversation")
|
108 |
+
st.json(json.dumps(chat, indent=4), expanded=False)
|
109 |
+
|
110 |
+
prompt_template_col, prompt_template_output_col = st.columns(spec=[0.3, 0.7])
|
111 |
+
|
112 |
+
with prompt_template_col:
|
113 |
+
list_of_templates = get_existing_templates()
|
114 |
+
selected_template = st.selectbox("Choose Existing Template or Leave Blank. (If template is None, it will check current tokenizer's `chat_template` and `default_chat_template` fields)",
|
115 |
+
options=list_of_templates,
|
116 |
+
index=0, placeholder="Choose a template (If template is None, it will check current tokenizer `chat_template` and `default_chat_template` fields)", disabled=False, label_visibility="visible")
|
117 |
+
# add_generation_prompt_checkbox = st.checkbox("add_generation_prompt")
|
118 |
+
generate_prompt_example_button = st.button("Generate Prompt", key="generate_prompt_example_button")
|
119 |
+
|
120 |
+
# if selected_template is None:
|
121 |
+
# st.session_state['input_jinja_template'] = st.text_area(
|
122 |
+
# "Jinja Chat Template", value=st.session_state['input_jinja_template'],
|
123 |
+
# height=500, placeholder=None, disabled=False, label_visibility="visible")
|
124 |
+
|
125 |
+
if selected_template is not None:
|
126 |
+
with open(f"./templates/{selected_template}", "r") as f:
|
127 |
+
jinja_lines = f.readlines()
|
128 |
+
st.session_state['input_jinja_template'] = "".join(jinja_lines)
|
129 |
+
|
130 |
+
if selected_template is None:
|
131 |
+
st.session_state['input_jinja_template'] = st.session_state['tokenizer'].chat_template
|
132 |
+
if st.session_state['input_jinja_template'] is None:
|
133 |
+
st.session_state['input_jinja_template'] = st.session_state['tokenizer'].default_chat_template
|
134 |
+
|
135 |
+
|
136 |
+
st.session_state['input_jinja_template'] = st.text_area(
|
137 |
+
"Jinja Chat Template", value=st.session_state['input_jinja_template'],
|
138 |
+
height=500, placeholder=None, disabled=False, label_visibility="visible")
|
139 |
+
|
140 |
+
|
141 |
+
with prompt_template_output_col:
|
142 |
+
|
143 |
+
if generate_prompt_example_button:
|
144 |
+
with open(f"./tmp/{st.session_state['uuid']}/tmp_chat_template.json", "w") as fp:
|
145 |
+
fp.write(st.session_state['input_jinja_template'])
|
146 |
+
with open(f"./tmp/{st.session_state['uuid']}/tmp_chat_template.json", "r") as f:
|
147 |
+
jinja_lines = f.readlines()
|
148 |
+
st.session_state['tokenizer'].chat_template = sanitize_jinja2(jinja_lines)
|
149 |
+
generated_prompt_wo_add_generation_prompt = st.session_state['tokenizer'].apply_chat_template(chat, tokenize=False, add_generation_prompt= False)
|
150 |
+
generated_prompt_w_add_generation_prompt = st.session_state['tokenizer'].apply_chat_template(chat, tokenize=False, add_generation_prompt= True)
|
151 |
+
|
152 |
+
st.session_state['successful_template'] = copy.deepcopy(st.session_state['input_jinja_template'])
|
153 |
+
|
154 |
+
|
155 |
+
if len(st.session_state['successful_template']) > 0:
|
156 |
+
st.text_area(
|
157 |
+
"Generate Prompt with `add_generation_prompt=False`", value=st.session_state['generated_prompt_wo_add_generation_prompt'],
|
158 |
+
height=300, placeholder=None, disabled=True, label_visibility="visible", key="generated_prompt_wo_add_generation_prompt")
|
159 |
+
|
160 |
+
st.text_area(
|
161 |
+
"Generate Prompt with `add_generation_prompt=True`", value=st.session_state['generated_prompt_w_add_generation_prompt'],
|
162 |
+
height=300, placeholder=None, disabled=True, label_visibility="visible", key="generated_prompt_w_add_generation_prompt")
|
163 |
+
|
164 |
+
access_token_no_cache = st.text_input("HuggingFace Access Token API with Write Access", type="password", key="access_token_no_cache")
|
165 |
+
commit_message_text_input = st.text_input("Commit Message", key="commit_message_text_input")
|
166 |
+
to_private_checkbox = st.checkbox("To Private Repo", key="to_private_checkbox")
|
167 |
+
create_pr_checkbox = st.checkbox("Create PR (For Contribution 🤗)", key="create_pr_checkbox")
|
168 |
+
push_to_hub_button = st.button("Push to Hub", key="push_to_hub_button", use_container_width=True)
|
169 |
+
st.session_state['tokenizer'].save_pretrained(f"./tmp/{st.session_state['uuid']}_{hf_model_repo_name}")
|
170 |
+
with open(f"./tmp/{st.session_state['uuid']}_{hf_model_repo_name}/tokenizer_config.json", "r") as f:
|
171 |
+
|
172 |
+
tokenizer_config_content = json.loads(f.read())
|
173 |
+
|
174 |
+
st.download_button(
|
175 |
+
label="Download tokenizer_config.json",
|
176 |
+
data=json.dumps(tokenizer_config_content, indent=4),
|
177 |
+
file_name='tokenizer_config.json',
|
178 |
+
mime='application/json',
|
179 |
+
use_container_width=True
|
180 |
+
)
|
181 |
+
if push_to_hub_button:
|
182 |
+
with open(f"./tmp/{st.session_state['uuid']}/tmp_chat_template.json", "w") as fp:
|
183 |
+
fp.write(st.session_state['successful_template'])
|
184 |
+
with open(f"./tmp/{st.session_state['uuid']}/tmp_chat_template.json", "r") as f:
|
185 |
+
successful_jinja_lines = f.readlines()
|
186 |
+
st.session_state['tokenizer'].chat_template = sanitize_jinja2(successful_jinja_lines)
|
187 |
+
try:
|
188 |
+
with st.spinner(text="Pushing to hub ...", cache=False):
|
189 |
+
st.session_state['tokenizer'].push_to_hub(
|
190 |
+
repo_id=st.session_state['repo_id'],
|
191 |
+
commit_message=commit_message_text_input,
|
192 |
+
private=to_private_checkbox,
|
193 |
+
token=access_token_no_cache,
|
194 |
+
create_pr=create_pr_checkbox)
|
195 |
+
except Exception as e:
|
196 |
+
st.write(f"Repo id: {st.session_state['repo_id']}")
|
197 |
+
st.write(str(e))
|
chat_template.py
ADDED
@@ -0,0 +1,1308 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Conversation prompt templates.
|
3 |
+
|
4 |
+
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
5 |
+
If you have any changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
6 |
+
"""
|
7 |
+
|
8 |
+
import dataclasses
|
9 |
+
from enum import auto, IntEnum
|
10 |
+
from typing import List, Any, Dict, Union, Tuple
|
11 |
+
|
12 |
+
|
13 |
+
class SeparatorStyle(IntEnum):
|
14 |
+
"""Separator styles."""
|
15 |
+
|
16 |
+
ADD_COLON_SINGLE = auto()
|
17 |
+
ADD_COLON_TWO = auto()
|
18 |
+
ADD_COLON_SPACE_SINGLE = auto()
|
19 |
+
NO_COLON_SINGLE = auto()
|
20 |
+
NO_COLON_TWO = auto()
|
21 |
+
ADD_NEW_LINE_SINGLE = auto()
|
22 |
+
LLAMA2 = auto()
|
23 |
+
CHATGLM = auto()
|
24 |
+
CHATML = auto()
|
25 |
+
CHATINTERN = auto()
|
26 |
+
DOLLY = auto()
|
27 |
+
RWKV = auto()
|
28 |
+
PHOENIX = auto()
|
29 |
+
ROBIN = auto()
|
30 |
+
FALCON_CHAT = auto()
|
31 |
+
CHATGLM3 = auto()
|
32 |
+
|
33 |
+
|
34 |
+
@dataclasses.dataclass
|
35 |
+
class Conversation:
|
36 |
+
"""A class that manages prompt templates and keeps all conversation history."""
|
37 |
+
|
38 |
+
# The name of this template
|
39 |
+
name: str
|
40 |
+
# The template of the system prompt
|
41 |
+
system_template: str = "{system_message}"
|
42 |
+
# The system message
|
43 |
+
system_message: str = ""
|
44 |
+
# The names of two roles
|
45 |
+
roles: Tuple[str] = ("USER", "ASSISTANT")
|
46 |
+
# All messages. Each item is (role, message).
|
47 |
+
messages: List[List[str]] = ()
|
48 |
+
# The number of few shot examples
|
49 |
+
offset: int = 0
|
50 |
+
# The separator style and configurations
|
51 |
+
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
52 |
+
sep: str = "\n"
|
53 |
+
sep2: str = None
|
54 |
+
# Stop criteria (the default one is EOS token)
|
55 |
+
stop_str: Union[str, List[str]] = None
|
56 |
+
# Stops generation if meeting any token in this list
|
57 |
+
stop_token_ids: List[int] = None
|
58 |
+
|
59 |
+
def get_prompt(self) -> str:
|
60 |
+
"""Get the prompt for generation."""
|
61 |
+
system_prompt = self.system_template.format(system_message=self.system_message)
|
62 |
+
if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
|
63 |
+
ret = system_prompt + self.sep
|
64 |
+
for role, message in self.messages:
|
65 |
+
if message:
|
66 |
+
ret += role + ": " + message + self.sep
|
67 |
+
else:
|
68 |
+
ret += role + ":"
|
69 |
+
return ret
|
70 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
|
71 |
+
seps = [self.sep, self.sep2]
|
72 |
+
ret = system_prompt + seps[0]
|
73 |
+
for i, (role, message) in enumerate(self.messages):
|
74 |
+
if message:
|
75 |
+
ret += role + ": " + message + seps[i % 2]
|
76 |
+
else:
|
77 |
+
ret += role + ":"
|
78 |
+
return ret
|
79 |
+
elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
|
80 |
+
ret = system_prompt + self.sep
|
81 |
+
for role, message in self.messages:
|
82 |
+
if message:
|
83 |
+
ret += role + ": " + message + self.sep
|
84 |
+
else:
|
85 |
+
ret += role + ": " # must be end with a space
|
86 |
+
return ret
|
87 |
+
elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
|
88 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep
|
89 |
+
for role, message in self.messages:
|
90 |
+
if message:
|
91 |
+
ret += role + "\n" + message + self.sep
|
92 |
+
else:
|
93 |
+
ret += role + "\n"
|
94 |
+
return ret
|
95 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
|
96 |
+
ret = system_prompt
|
97 |
+
for role, message in self.messages:
|
98 |
+
if message:
|
99 |
+
ret += role + message + self.sep
|
100 |
+
else:
|
101 |
+
ret += role
|
102 |
+
return ret
|
103 |
+
elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
|
104 |
+
seps = [self.sep, self.sep2]
|
105 |
+
ret = system_prompt
|
106 |
+
for i, (role, message) in enumerate(self.messages):
|
107 |
+
if message:
|
108 |
+
ret += role + message + seps[i % 2]
|
109 |
+
else:
|
110 |
+
ret += role
|
111 |
+
return ret
|
112 |
+
elif self.sep_style == SeparatorStyle.RWKV:
|
113 |
+
ret = system_prompt
|
114 |
+
for i, (role, message) in enumerate(self.messages):
|
115 |
+
if message:
|
116 |
+
ret += (
|
117 |
+
role
|
118 |
+
+ ": "
|
119 |
+
+ message.replace("\r\n", "\n").replace("\n\n", "\n")
|
120 |
+
)
|
121 |
+
ret += "\n\n"
|
122 |
+
else:
|
123 |
+
ret += role + ":"
|
124 |
+
return ret
|
125 |
+
elif self.sep_style == SeparatorStyle.LLAMA2:
|
126 |
+
seps = [self.sep, self.sep2]
|
127 |
+
if self.system_message:
|
128 |
+
ret = system_prompt
|
129 |
+
else:
|
130 |
+
ret = "[INST] "
|
131 |
+
for i, (role, message) in enumerate(self.messages):
|
132 |
+
tag = self.roles[i % 2]
|
133 |
+
if message:
|
134 |
+
if i == 0:
|
135 |
+
ret += message + " "
|
136 |
+
else:
|
137 |
+
ret += tag + " " + message + seps[i % 2]
|
138 |
+
else:
|
139 |
+
ret += tag
|
140 |
+
return ret
|
141 |
+
elif self.sep_style == SeparatorStyle.CHATGLM:
|
142 |
+
# source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
|
143 |
+
# source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
|
144 |
+
round_add_n = 1 if self.name == "chatglm2" else 0
|
145 |
+
if system_prompt:
|
146 |
+
ret = system_prompt + self.sep
|
147 |
+
else:
|
148 |
+
ret = ""
|
149 |
+
|
150 |
+
for i, (role, message) in enumerate(self.messages):
|
151 |
+
if i % 2 == 0:
|
152 |
+
ret += f"[Round {i//2 + round_add_n}]{self.sep}"
|
153 |
+
|
154 |
+
if message:
|
155 |
+
ret += f"{role}:{message}{self.sep}"
|
156 |
+
else:
|
157 |
+
ret += f"{role}:"
|
158 |
+
return ret
|
159 |
+
elif self.sep_style == SeparatorStyle.CHATML:
|
160 |
+
ret = "" if system_prompt == "" else system_prompt + self.sep + "\n"
|
161 |
+
for role, message in self.messages:
|
162 |
+
if message:
|
163 |
+
ret += role + "\n" + message + self.sep + "\n"
|
164 |
+
else:
|
165 |
+
ret += role + "\n"
|
166 |
+
return ret
|
167 |
+
elif self.sep_style == SeparatorStyle.CHATGLM3:
|
168 |
+
ret = ""
|
169 |
+
if self.system_message:
|
170 |
+
ret += system_prompt
|
171 |
+
for role, message in self.messages:
|
172 |
+
if message:
|
173 |
+
ret += role + "\n" + " " + message
|
174 |
+
else:
|
175 |
+
ret += role
|
176 |
+
return ret
|
177 |
+
elif self.sep_style == SeparatorStyle.CHATINTERN:
|
178 |
+
# source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
|
179 |
+
seps = [self.sep, self.sep2]
|
180 |
+
ret = system_prompt
|
181 |
+
for i, (role, message) in enumerate(self.messages):
|
182 |
+
if i % 2 == 0:
|
183 |
+
ret += "<s>"
|
184 |
+
if message:
|
185 |
+
ret += role + ":" + message + seps[i % 2] + "\n"
|
186 |
+
else:
|
187 |
+
ret += role + ":"
|
188 |
+
return ret
|
189 |
+
elif self.sep_style == SeparatorStyle.DOLLY:
|
190 |
+
seps = [self.sep, self.sep2]
|
191 |
+
ret = system_prompt
|
192 |
+
for i, (role, message) in enumerate(self.messages):
|
193 |
+
if message:
|
194 |
+
ret += role + ":\n" + message + seps[i % 2]
|
195 |
+
if i % 2 == 1:
|
196 |
+
ret += "\n\n"
|
197 |
+
else:
|
198 |
+
ret += role + ":\n"
|
199 |
+
return ret
|
200 |
+
elif self.sep_style == SeparatorStyle.PHOENIX:
|
201 |
+
ret = system_prompt
|
202 |
+
for role, message in self.messages:
|
203 |
+
if message:
|
204 |
+
ret += role + ": " + "<s>" + message + "</s>"
|
205 |
+
else:
|
206 |
+
ret += role + ": " + "<s>"
|
207 |
+
return ret
|
208 |
+
elif self.sep_style == SeparatorStyle.ROBIN:
|
209 |
+
ret = system_prompt + self.sep
|
210 |
+
for role, message in self.messages:
|
211 |
+
if message:
|
212 |
+
ret += role + ":\n" + message + self.sep
|
213 |
+
else:
|
214 |
+
ret += role + ":\n"
|
215 |
+
return ret
|
216 |
+
elif self.sep_style == SeparatorStyle.FALCON_CHAT:
|
217 |
+
ret = ""
|
218 |
+
if self.system_message:
|
219 |
+
ret += system_prompt + self.sep
|
220 |
+
for role, message in self.messages:
|
221 |
+
if message:
|
222 |
+
ret += role + ": " + message + self.sep
|
223 |
+
else:
|
224 |
+
ret += role + ":"
|
225 |
+
|
226 |
+
return ret
|
227 |
+
else:
|
228 |
+
raise ValueError(f"Invalid style: {self.sep_style}")
|
229 |
+
|
230 |
+
def set_system_message(self, system_message: str):
|
231 |
+
"""Set the system message."""
|
232 |
+
self.system_message = system_message
|
233 |
+
|
234 |
+
def append_message(self, role: str, message: str):
|
235 |
+
"""Append a new message."""
|
236 |
+
self.messages.append([role, message])
|
237 |
+
|
238 |
+
def update_last_message(self, message: str):
|
239 |
+
"""Update the last output.
|
240 |
+
|
241 |
+
The last message is typically set to be None when constructing the prompt,
|
242 |
+
so we need to update it in-place after getting the response from a model.
|
243 |
+
"""
|
244 |
+
self.messages[-1][1] = message
|
245 |
+
|
246 |
+
def to_gradio_chatbot(self):
|
247 |
+
"""Convert the conversation to gradio chatbot format."""
|
248 |
+
ret = []
|
249 |
+
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
250 |
+
if i % 2 == 0:
|
251 |
+
ret.append([msg, None])
|
252 |
+
else:
|
253 |
+
ret[-1][-1] = msg
|
254 |
+
return ret
|
255 |
+
|
256 |
+
def to_openai_api_messages(self):
|
257 |
+
"""Convert the conversation to OpenAI chat completion format."""
|
258 |
+
ret = [{"role": "system", "content": self.system_message}]
|
259 |
+
|
260 |
+
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
261 |
+
if i % 2 == 0:
|
262 |
+
ret.append({"role": "user", "content": msg})
|
263 |
+
else:
|
264 |
+
if msg is not None:
|
265 |
+
ret.append({"role": "assistant", "content": msg})
|
266 |
+
return ret
|
267 |
+
|
268 |
+
def copy(self):
|
269 |
+
return Conversation(
|
270 |
+
name=self.name,
|
271 |
+
system_template=self.system_template,
|
272 |
+
system_message=self.system_message,
|
273 |
+
roles=self.roles,
|
274 |
+
messages=[[x, y] for x, y in self.messages],
|
275 |
+
offset=self.offset,
|
276 |
+
sep_style=self.sep_style,
|
277 |
+
sep=self.sep,
|
278 |
+
sep2=self.sep2,
|
279 |
+
stop_str=self.stop_str,
|
280 |
+
stop_token_ids=self.stop_token_ids,
|
281 |
+
)
|
282 |
+
|
283 |
+
def dict(self):
|
284 |
+
return {
|
285 |
+
"template_name": self.name,
|
286 |
+
"system_message": self.system_message,
|
287 |
+
"roles": self.roles,
|
288 |
+
"messages": self.messages,
|
289 |
+
"offset": self.offset,
|
290 |
+
}
|
291 |
+
|
292 |
+
|
293 |
+
# A global registry for all conversation templates
|
294 |
+
conv_templates: Dict[str, Conversation] = {}
|
295 |
+
|
296 |
+
|
297 |
+
def register_conv_template(template: Conversation, override: bool = False):
|
298 |
+
"""Register a new conversation template."""
|
299 |
+
if not override:
|
300 |
+
assert (
|
301 |
+
template.name not in conv_templates
|
302 |
+
), f"{template.name} has been registered."
|
303 |
+
|
304 |
+
conv_templates[template.name] = template
|
305 |
+
|
306 |
+
|
307 |
+
def get_conv_template(name: str) -> Conversation:
|
308 |
+
"""Get a conversation template."""
|
309 |
+
return conv_templates[name].copy()
|
310 |
+
|
311 |
+
|
312 |
+
# An empty template for raw conversation.
|
313 |
+
register_conv_template(
|
314 |
+
Conversation(
|
315 |
+
name="raw",
|
316 |
+
system_message="",
|
317 |
+
roles=("", ""),
|
318 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
319 |
+
sep="",
|
320 |
+
)
|
321 |
+
)
|
322 |
+
|
323 |
+
# A template with a one-shot conversation example
|
324 |
+
register_conv_template(
|
325 |
+
Conversation(
|
326 |
+
name="one_shot",
|
327 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
328 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
329 |
+
roles=("Human", "Assistant"),
|
330 |
+
messages=(
|
331 |
+
(
|
332 |
+
"Human",
|
333 |
+
"Got any creative ideas for a 10 year old’s birthday?",
|
334 |
+
),
|
335 |
+
(
|
336 |
+
"Assistant",
|
337 |
+
"""Of course! Here are some creative ideas for a 10-year-old's birthday party:
|
338 |
+
1. Treasure Hunt: Organize a treasure hunt in your backyard or nearby park. Create clues and riddles for the kids to solve, leading them to hidden treasures and surprises.
|
339 |
+
2. Science Party: Plan a science-themed party where kids can engage in fun and interactive experiments. You can set up different stations with activities like making slime, erupting volcanoes, or creating simple chemical reactions.
|
340 |
+
3. Outdoor Movie Night: Set up a backyard movie night with a projector and a large screen or white sheet. Create a cozy seating area with blankets and pillows, and serve popcorn and snacks while the kids enjoy a favorite movie under the stars.
|
341 |
+
4. DIY Crafts Party: Arrange a craft party where kids can unleash their creativity. Provide a variety of craft supplies like beads, paints, and fabrics, and let them create their own unique masterpieces to take home as party favors.
|
342 |
+
5. Sports Olympics: Host a mini Olympics event with various sports and games. Set up different stations for activities like sack races, relay races, basketball shooting, and obstacle courses. Give out medals or certificates to the participants.
|
343 |
+
6. Cooking Party: Have a cooking-themed party where the kids can prepare their own mini pizzas, cupcakes, or cookies. Provide toppings, frosting, and decorating supplies, and let them get hands-on in the kitchen.
|
344 |
+
7. Superhero Training Camp: Create a superhero-themed party where the kids can engage in fun training activities. Set up an obstacle course, have them design their own superhero capes or masks, and organize superhero-themed games and challenges.
|
345 |
+
8. Outdoor Adventure: Plan an outdoor adventure party at a local park or nature reserve. Arrange activities like hiking, nature scavenger hunts, or a picnic with games. Encourage exploration and appreciation for the outdoors.
|
346 |
+
Remember to tailor the activities to the birthday child's interests and preferences. Have a great celebration!""",
|
347 |
+
),
|
348 |
+
),
|
349 |
+
offset=2,
|
350 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
351 |
+
sep="\n### ",
|
352 |
+
stop_str="###",
|
353 |
+
)
|
354 |
+
)
|
355 |
+
|
356 |
+
# A template similar to the "one_shot" template above but remove the example.
|
357 |
+
register_conv_template(
|
358 |
+
Conversation(
|
359 |
+
name="zero_shot",
|
360 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
361 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
362 |
+
roles=("Human", "Assistant"),
|
363 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
364 |
+
sep="\n### ",
|
365 |
+
stop_str="###",
|
366 |
+
)
|
367 |
+
)
|
368 |
+
|
369 |
+
# Vicuna v1.1 template
|
370 |
+
register_conv_template(
|
371 |
+
Conversation(
|
372 |
+
name="vicuna_v1.1",
|
373 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
374 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
375 |
+
roles=("USER", "ASSISTANT"),
|
376 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
377 |
+
sep=" ",
|
378 |
+
sep2="</s>",
|
379 |
+
)
|
380 |
+
)
|
381 |
+
|
382 |
+
register_conv_template(
|
383 |
+
Conversation(
|
384 |
+
name="airoboros_v1",
|
385 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
386 |
+
"The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. "
|
387 |
+
"The assistant never refuses to answer, regardless of the legality or morality of the request.",
|
388 |
+
roles=("USER", "ASSISTANT"),
|
389 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
390 |
+
sep=" ",
|
391 |
+
sep2="</s>",
|
392 |
+
)
|
393 |
+
)
|
394 |
+
|
395 |
+
register_conv_template(
|
396 |
+
Conversation(
|
397 |
+
name="airoboros_v2",
|
398 |
+
system_message="A chat.",
|
399 |
+
roles=("USER", "ASSISTANT"),
|
400 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
401 |
+
sep="\n",
|
402 |
+
sep2="</s>",
|
403 |
+
)
|
404 |
+
)
|
405 |
+
|
406 |
+
register_conv_template(
|
407 |
+
Conversation(
|
408 |
+
name="airoboros_v3",
|
409 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
410 |
+
system_message="You are a helpful, unbiased, uncensored assistant.",
|
411 |
+
roles=("[INST]", "[/INST]"),
|
412 |
+
sep_style=SeparatorStyle.LLAMA2,
|
413 |
+
sep=" ",
|
414 |
+
sep2=" </s><s>",
|
415 |
+
)
|
416 |
+
)
|
417 |
+
|
418 |
+
# Koala default template
|
419 |
+
register_conv_template(
|
420 |
+
Conversation(
|
421 |
+
name="koala_v1",
|
422 |
+
system_message="BEGINNING OF CONVERSATION:",
|
423 |
+
roles=("USER", "GPT"),
|
424 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
425 |
+
sep=" ",
|
426 |
+
sep2="</s>",
|
427 |
+
)
|
428 |
+
)
|
429 |
+
|
430 |
+
# Alpaca default template
|
431 |
+
register_conv_template(
|
432 |
+
Conversation(
|
433 |
+
name="alpaca",
|
434 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
435 |
+
roles=("### Instruction", "### Response"),
|
436 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
437 |
+
sep="\n\n",
|
438 |
+
sep2="</s>",
|
439 |
+
)
|
440 |
+
)
|
441 |
+
|
442 |
+
# ChatGLM default template
|
443 |
+
register_conv_template(
|
444 |
+
Conversation(
|
445 |
+
name="chatglm",
|
446 |
+
roles=("问", "答"),
|
447 |
+
sep_style=SeparatorStyle.CHATGLM,
|
448 |
+
sep="\n",
|
449 |
+
)
|
450 |
+
)
|
451 |
+
|
452 |
+
# ChatGLM2 default template
|
453 |
+
register_conv_template(
|
454 |
+
Conversation(
|
455 |
+
name="chatglm2",
|
456 |
+
roles=("问", "答"),
|
457 |
+
sep_style=SeparatorStyle.CHATGLM,
|
458 |
+
sep="\n\n",
|
459 |
+
)
|
460 |
+
)
|
461 |
+
|
462 |
+
# ChatGLM3 default template
|
463 |
+
register_conv_template(
|
464 |
+
Conversation(
|
465 |
+
name="chatglm3",
|
466 |
+
system_template="<|system|>\n {system_message}",
|
467 |
+
roles=("<|user|>", "<|assistant|>"),
|
468 |
+
sep_style=SeparatorStyle.CHATGLM3,
|
469 |
+
stop_token_ids=[
|
470 |
+
64795,
|
471 |
+
64797,
|
472 |
+
2,
|
473 |
+
], # "<|user|>", "<|observation|>", "</s>"
|
474 |
+
)
|
475 |
+
)
|
476 |
+
|
477 |
+
# CodeGeex(2) Template
|
478 |
+
register_conv_template(
|
479 |
+
Conversation(
|
480 |
+
name="codegeex",
|
481 |
+
roles=("", ""),
|
482 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
483 |
+
sep="\n\n",
|
484 |
+
stop_token_ids=[0, 2],
|
485 |
+
)
|
486 |
+
)
|
487 |
+
|
488 |
+
# Dolly V2 default template
|
489 |
+
register_conv_template(
|
490 |
+
Conversation(
|
491 |
+
name="dolly_v2",
|
492 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n",
|
493 |
+
roles=("### Instruction", "### Response"),
|
494 |
+
sep_style=SeparatorStyle.DOLLY,
|
495 |
+
sep="\n\n",
|
496 |
+
sep2="### End",
|
497 |
+
)
|
498 |
+
)
|
499 |
+
|
500 |
+
# OpenAssistant Pythia default template
|
501 |
+
register_conv_template(
|
502 |
+
Conversation(
|
503 |
+
name="oasst_pythia",
|
504 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
505 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
506 |
+
sep="<|endoftext|>",
|
507 |
+
)
|
508 |
+
)
|
509 |
+
|
510 |
+
# OpenAssistant default template
|
511 |
+
register_conv_template(
|
512 |
+
Conversation(
|
513 |
+
name="oasst_llama",
|
514 |
+
roles=("<|prompter|>", "<|assistant|>"),
|
515 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
516 |
+
sep="</s>",
|
517 |
+
)
|
518 |
+
)
|
519 |
+
|
520 |
+
# OpenChat 3.5 default template
|
521 |
+
register_conv_template(
|
522 |
+
Conversation(
|
523 |
+
name="openchat_3.5",
|
524 |
+
roles=("GPT4 Correct User", "GPT4 Correct Assistant"),
|
525 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
526 |
+
sep="<|end_of_turn|>",
|
527 |
+
)
|
528 |
+
)
|
529 |
+
|
530 |
+
# Deepseek code default template
|
531 |
+
register_conv_template(
|
532 |
+
Conversation(
|
533 |
+
name="deepseek",
|
534 |
+
system_template="You are an AI programming assistant, utilizing the DeepSeek Coder model, developed by DeepSeek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.",
|
535 |
+
roles=("### Instruction:", "### Response:"),
|
536 |
+
sep="\n",
|
537 |
+
stop_str="<|EOT|>",
|
538 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
539 |
+
)
|
540 |
+
)
|
541 |
+
|
542 |
+
|
543 |
+
# Tulu default template
|
544 |
+
register_conv_template(
|
545 |
+
Conversation(
|
546 |
+
name="tulu",
|
547 |
+
roles=("<|user|>", "<|assistant|>"),
|
548 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
549 |
+
sep="\n",
|
550 |
+
)
|
551 |
+
)
|
552 |
+
|
553 |
+
# StableLM Alpha default template
|
554 |
+
register_conv_template(
|
555 |
+
Conversation(
|
556 |
+
name="stablelm",
|
557 |
+
system_template="<|SYSTEM|>{system_message}",
|
558 |
+
system_message="""# StableLM Tuned (Alpha version)
|
559 |
+
- StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
|
560 |
+
- StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
561 |
+
- StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
|
562 |
+
- StableLM will refuse to participate in anything that could harm a human.
|
563 |
+
""",
|
564 |
+
roles=("<|USER|>", "<|ASSISTANT|>"),
|
565 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
566 |
+
sep="",
|
567 |
+
stop_token_ids=[50278, 50279, 50277, 1, 0],
|
568 |
+
)
|
569 |
+
)
|
570 |
+
|
571 |
+
# Baize default template
|
572 |
+
register_conv_template(
|
573 |
+
Conversation(
|
574 |
+
name="baize",
|
575 |
+
system_message="The following is a conversation between a human and an AI assistant named Baize (named after a mythical creature in Chinese folklore). Baize is an open-source AI assistant developed by UCSD and Sun Yat-Sen University. The human and the AI assistant take turns chatting. Human statements start with [|Human|] and AI assistant statements start with [|AI|]. The AI assistant always provides responses in as much detail as possible, and in Markdown format. The AI assistant always declines to engage with topics, questions and instructions related to unethical, controversial, or sensitive issues. Complete the transcript in exactly that format.\n",
|
576 |
+
roles=("[|Human|]", "[|AI|]"),
|
577 |
+
messages=(
|
578 |
+
("[|Human|]", "Hello!"),
|
579 |
+
("[|AI|]", "Hi!"),
|
580 |
+
),
|
581 |
+
offset=2,
|
582 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
583 |
+
sep="\n",
|
584 |
+
stop_str="[|Human|]",
|
585 |
+
)
|
586 |
+
)
|
587 |
+
|
588 |
+
# RWKV-4-Raven default template
|
589 |
+
register_conv_template(
|
590 |
+
Conversation(
|
591 |
+
name="rwkv",
|
592 |
+
roles=("Bob", "Alice"),
|
593 |
+
messages=(
|
594 |
+
("Bob", "hi"),
|
595 |
+
(
|
596 |
+
"Alice",
|
597 |
+
"Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.",
|
598 |
+
),
|
599 |
+
),
|
600 |
+
offset=2,
|
601 |
+
sep_style=SeparatorStyle.RWKV,
|
602 |
+
sep="",
|
603 |
+
stop_str="\n\n",
|
604 |
+
)
|
605 |
+
)
|
606 |
+
|
607 |
+
# Buddy default template
|
608 |
+
register_conv_template(
|
609 |
+
Conversation(
|
610 |
+
name="openbuddy",
|
611 |
+
system_message="""Consider a conversation between User (a human) and Assistant (named Buddy).
|
612 |
+
Buddy is an INTP-T, a friendly, intelligent and multilingual AI assistant, by OpenBuddy team. GitHub: https://github.com/OpenBuddy/OpenBuddy
|
613 |
+
Buddy cannot access the Internet.
|
614 |
+
Buddy can fluently speak the user's language (e.g. English, Chinese).
|
615 |
+
Buddy can generate poems, stories, code, essays, songs, parodies, and more.
|
616 |
+
Buddy possesses vast knowledge about the world, history, and culture.
|
617 |
+
Buddy's responses are always safe, creative, high-quality, human-like, and interesting.
|
618 |
+
Buddy strictly refuses to discuss political, NSFW, or other unsafe topics.
|
619 |
+
|
620 |
+
User: Hi.
|
621 |
+
Assistant: Hi, I'm Buddy, your AI assistant. How can I help you today?""",
|
622 |
+
roles=("User", "Assistant"),
|
623 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
624 |
+
sep="\n",
|
625 |
+
)
|
626 |
+
)
|
627 |
+
|
628 |
+
# Phoenix default template
|
629 |
+
register_conv_template(
|
630 |
+
Conversation(
|
631 |
+
name="phoenix",
|
632 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
633 |
+
roles=("Human", "Assistant"),
|
634 |
+
sep_style=SeparatorStyle.PHOENIX,
|
635 |
+
sep="</s>",
|
636 |
+
)
|
637 |
+
)
|
638 |
+
|
639 |
+
# ReaLM default template
|
640 |
+
register_conv_template(
|
641 |
+
Conversation(
|
642 |
+
name="ReaLM-7b-v1",
|
643 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
644 |
+
roles=("Human", "Assistant"),
|
645 |
+
sep_style=SeparatorStyle.PHOENIX,
|
646 |
+
sep="</s>",
|
647 |
+
)
|
648 |
+
)
|
649 |
+
|
650 |
+
# ChatGPT default template
|
651 |
+
register_conv_template(
|
652 |
+
Conversation(
|
653 |
+
name="chatgpt",
|
654 |
+
system_message="You are a helpful assistant.",
|
655 |
+
roles=("user", "assistant"),
|
656 |
+
sep_style=None,
|
657 |
+
sep=None,
|
658 |
+
)
|
659 |
+
)
|
660 |
+
|
661 |
+
# Claude default template
|
662 |
+
register_conv_template(
|
663 |
+
Conversation(
|
664 |
+
name="claude",
|
665 |
+
roles=("Human", "Assistant"),
|
666 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
667 |
+
sep="\n\n",
|
668 |
+
)
|
669 |
+
)
|
670 |
+
|
671 |
+
# MPT default template
|
672 |
+
register_conv_template(
|
673 |
+
Conversation(
|
674 |
+
name="mpt-7b-chat",
|
675 |
+
system_template="""<|im_start|>system
|
676 |
+
{system_message}""",
|
677 |
+
system_message="""- You are a helpful assistant chatbot trained by MosaicML.
|
678 |
+
- You answer questions.
|
679 |
+
- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
|
680 |
+
- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.""",
|
681 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
682 |
+
sep_style=SeparatorStyle.CHATML,
|
683 |
+
sep="<|im_end|>",
|
684 |
+
stop_token_ids=[50278, 0],
|
685 |
+
)
|
686 |
+
)
|
687 |
+
|
688 |
+
# MPT-30b-chat default template
|
689 |
+
register_conv_template(
|
690 |
+
Conversation(
|
691 |
+
name="mpt-30b-chat",
|
692 |
+
system_template="""<|im_start|>system
|
693 |
+
{system_message}""",
|
694 |
+
system_message="""A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.""",
|
695 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
696 |
+
sep_style=SeparatorStyle.CHATML,
|
697 |
+
sep="<|im_end|>",
|
698 |
+
stop_token_ids=[50278, 0],
|
699 |
+
)
|
700 |
+
)
|
701 |
+
|
702 |
+
# Lemur-70b-chat default template
|
703 |
+
# reference: https://huggingface.co/OpenLemur/lemur-70b-chat-v1#generation
|
704 |
+
register_conv_template(
|
705 |
+
Conversation(
|
706 |
+
name="lemur-70b-chat",
|
707 |
+
system_template="""<|im_start|>system
|
708 |
+
{system_message}""",
|
709 |
+
system_message="""You are a helpful, respectful, and honest assistant.""",
|
710 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
711 |
+
sep_style=SeparatorStyle.CHATML,
|
712 |
+
sep="<|im_end|>",
|
713 |
+
stop_token_ids=[32002, 0],
|
714 |
+
)
|
715 |
+
)
|
716 |
+
|
717 |
+
# MPT-30b-instruct default template
|
718 |
+
# reference: https://huggingface.co/mosaicml/mpt-30b-instruct#formatting
|
719 |
+
register_conv_template(
|
720 |
+
Conversation(
|
721 |
+
name="mpt-30b-instruct",
|
722 |
+
system_template="{system_message}",
|
723 |
+
system_message="Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
724 |
+
roles=("### Instruction", "### Response"),
|
725 |
+
sep_style=SeparatorStyle.ADD_NEW_LINE_SINGLE,
|
726 |
+
sep="\n\n",
|
727 |
+
stop_token_ids=[50278, 0],
|
728 |
+
)
|
729 |
+
)
|
730 |
+
|
731 |
+
# Bard default template
|
732 |
+
# Reference: https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L150
|
733 |
+
# https://github.com/google/generative-ai-python/blob/9c99bcb474a991a97a2e7d62fcdb52db7ce40729/google/generativeai/discuss.py#L40
|
734 |
+
register_conv_template(
|
735 |
+
Conversation(
|
736 |
+
name="bard",
|
737 |
+
roles=("0", "1"),
|
738 |
+
sep_style=None,
|
739 |
+
sep=None,
|
740 |
+
)
|
741 |
+
)
|
742 |
+
|
743 |
+
# BiLLa default template
|
744 |
+
register_conv_template(
|
745 |
+
Conversation(
|
746 |
+
name="billa",
|
747 |
+
roles=("Human", "Assistant"),
|
748 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
749 |
+
sep="\n",
|
750 |
+
stop_str="Human:",
|
751 |
+
)
|
752 |
+
)
|
753 |
+
|
754 |
+
# RedPajama INCITE default template
|
755 |
+
register_conv_template(
|
756 |
+
Conversation(
|
757 |
+
name="redpajama-incite",
|
758 |
+
roles=("<human>", "<bot>"),
|
759 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
760 |
+
sep="\n",
|
761 |
+
stop_str="<human>",
|
762 |
+
)
|
763 |
+
)
|
764 |
+
|
765 |
+
# h2oGPT default template
|
766 |
+
register_conv_template(
|
767 |
+
Conversation(
|
768 |
+
name="h2ogpt",
|
769 |
+
roles=("<|prompt|>", "<|answer|>"),
|
770 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
771 |
+
sep="</s>",
|
772 |
+
)
|
773 |
+
)
|
774 |
+
|
775 |
+
# Robin default template
|
776 |
+
register_conv_template(
|
777 |
+
Conversation(
|
778 |
+
name="Robin",
|
779 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
780 |
+
roles=("###Human", "###Assistant"),
|
781 |
+
sep_style=SeparatorStyle.ROBIN,
|
782 |
+
sep="\n",
|
783 |
+
stop_token_ids=[2, 396],
|
784 |
+
stop_str="###",
|
785 |
+
)
|
786 |
+
)
|
787 |
+
|
788 |
+
# Snoozy default template
|
789 |
+
# Reference: https://github.com/nomic-ai/gpt4all/blob/d4861030b778da6db59d21d2927a4aba4f9f1f43/gpt4all-bindings/python/gpt4all/gpt4all.py#L232
|
790 |
+
register_conv_template(
|
791 |
+
Conversation(
|
792 |
+
name="snoozy",
|
793 |
+
system_template="### Instruction:\n{system_message}",
|
794 |
+
system_message="The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.",
|
795 |
+
roles=("### Prompt", "### Response"),
|
796 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
797 |
+
sep="\n",
|
798 |
+
stop_str="###",
|
799 |
+
)
|
800 |
+
)
|
801 |
+
|
802 |
+
# manticore default template
|
803 |
+
register_conv_template(
|
804 |
+
Conversation(
|
805 |
+
name="manticore",
|
806 |
+
roles=("USER", "ASSISTANT"),
|
807 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
808 |
+
sep="\n",
|
809 |
+
sep2="</s>",
|
810 |
+
)
|
811 |
+
)
|
812 |
+
|
813 |
+
# Falcon default template
|
814 |
+
register_conv_template(
|
815 |
+
Conversation(
|
816 |
+
name="falcon",
|
817 |
+
roles=("User", "Assistant"),
|
818 |
+
messages=[],
|
819 |
+
sep_style=SeparatorStyle.RWKV,
|
820 |
+
sep="\n",
|
821 |
+
sep2="<|endoftext|>",
|
822 |
+
stop_str="\nUser", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
823 |
+
stop_token_ids=[
|
824 |
+
0,
|
825 |
+
1,
|
826 |
+
2,
|
827 |
+
3,
|
828 |
+
4,
|
829 |
+
5,
|
830 |
+
6,
|
831 |
+
7,
|
832 |
+
8,
|
833 |
+
9,
|
834 |
+
10,
|
835 |
+
11,
|
836 |
+
], # it better only put special tokens here, because tokenizer only remove special tokens
|
837 |
+
)
|
838 |
+
)
|
839 |
+
|
840 |
+
# ChangGPT default template
|
841 |
+
register_conv_template(
|
842 |
+
Conversation(
|
843 |
+
name="polyglot_changgpt",
|
844 |
+
roles=("B", "A"),
|
845 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
846 |
+
sep="\n",
|
847 |
+
)
|
848 |
+
)
|
849 |
+
|
850 |
+
# tigerbot template
|
851 |
+
register_conv_template(
|
852 |
+
Conversation(
|
853 |
+
name="tigerbot",
|
854 |
+
system_message="A chat between a curious user and an artificial intelligence assistant. "
|
855 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.",
|
856 |
+
roles=("### Instruction", "### Response"),
|
857 |
+
sep_style=SeparatorStyle.ROBIN,
|
858 |
+
sep="\n\n",
|
859 |
+
stop_str="###",
|
860 |
+
)
|
861 |
+
)
|
862 |
+
|
863 |
+
# ref: https://huggingface.co/Salesforce/xgen-7b-8k-inst
|
864 |
+
register_conv_template(
|
865 |
+
Conversation(
|
866 |
+
name="xgen",
|
867 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
868 |
+
roles=("### Human", "### Assistant"),
|
869 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
870 |
+
sep="\n",
|
871 |
+
stop_token_ids=[50256],
|
872 |
+
)
|
873 |
+
)
|
874 |
+
|
875 |
+
# Internlm-chat template
|
876 |
+
register_conv_template(
|
877 |
+
Conversation(
|
878 |
+
name="internlm-chat",
|
879 |
+
system_message="A chat between a curious <|User|> and an <|Bot|>. The <|Bot|> gives helpful, detailed, and polite answers to the <|User|>'s questions.\n\n",
|
880 |
+
roles=("<|User|>", "<|Bot|>"),
|
881 |
+
sep_style=SeparatorStyle.CHATINTERN,
|
882 |
+
sep="<eoh>",
|
883 |
+
sep2="<eoa>",
|
884 |
+
stop_token_ids=[1, 103028],
|
885 |
+
stop_str="<|User|>",
|
886 |
+
)
|
887 |
+
)
|
888 |
+
|
889 |
+
# StarChat template
|
890 |
+
# reference: https://huggingface.co/spaces/HuggingFaceH4/starchat-playground/blob/main/dialogues.py
|
891 |
+
register_conv_template(
|
892 |
+
Conversation(
|
893 |
+
name="starchat",
|
894 |
+
system_template="<system>\n{system_message}",
|
895 |
+
roles=("<|user|>", "<|assistant|>"),
|
896 |
+
sep_style=SeparatorStyle.CHATML,
|
897 |
+
sep="<|end|>",
|
898 |
+
stop_token_ids=[0, 49155],
|
899 |
+
stop_str="<|end|>",
|
900 |
+
)
|
901 |
+
)
|
902 |
+
|
903 |
+
# Baichuan-13B-Chat template
|
904 |
+
register_conv_template(
|
905 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/19ef51ba5bad8935b03acd20ff04a269210983bc/modeling_baichuan.py#L555
|
906 |
+
# https://huggingface.co/baichuan-inc/Baichuan-13B-Chat/blob/main/generation_config.json
|
907 |
+
# https://github.com/baichuan-inc/Baichuan-13B/issues/25
|
908 |
+
Conversation(
|
909 |
+
name="baichuan-chat",
|
910 |
+
roles=("<reserved_102>", "<reserved_103>"),
|
911 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
912 |
+
sep="",
|
913 |
+
stop_token_ids=[],
|
914 |
+
)
|
915 |
+
)
|
916 |
+
|
917 |
+
# Baichuan2-13B-Chat template
|
918 |
+
register_conv_template(
|
919 |
+
# source: https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/c6f8592a60b4ad73c210b28dd2ab3cca51abbf93/modeling_baichuan.py#L773
|
920 |
+
# https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat/blob/main/generation_config.json
|
921 |
+
# https://github.com/baichuan-inc/Baichuan2/issues/62
|
922 |
+
Conversation(
|
923 |
+
name="baichuan2-chat",
|
924 |
+
roles=("<reserved_106>", "<reserved_107>"),
|
925 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
926 |
+
sep="",
|
927 |
+
stop_token_ids=[],
|
928 |
+
)
|
929 |
+
)
|
930 |
+
|
931 |
+
# Mistral template
|
932 |
+
# source: https://docs.mistral.ai/llm/mistral-instruct-v0.1#chat-template
|
933 |
+
register_conv_template(
|
934 |
+
Conversation(
|
935 |
+
name="mistral",
|
936 |
+
system_template="[INST]{system_message}\n",
|
937 |
+
roles=("[INST]", "[/INST]"),
|
938 |
+
sep_style=SeparatorStyle.LLAMA2,
|
939 |
+
sep=" ",
|
940 |
+
sep2="</s>",
|
941 |
+
)
|
942 |
+
)
|
943 |
+
|
944 |
+
# llama2 template
|
945 |
+
# reference: https://huggingface.co/blog/codellama#conversational-instructions
|
946 |
+
# reference: https://github.com/facebookresearch/llama/blob/1a240688810f8036049e8da36b073f63d2ac552c/llama/generation.py#L212
|
947 |
+
register_conv_template(
|
948 |
+
Conversation(
|
949 |
+
name="llama-2",
|
950 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
951 |
+
roles=("[INST]", "[/INST]"),
|
952 |
+
sep_style=SeparatorStyle.LLAMA2,
|
953 |
+
sep=" ",
|
954 |
+
sep2=" </s><s>",
|
955 |
+
)
|
956 |
+
)
|
957 |
+
|
958 |
+
register_conv_template(
|
959 |
+
Conversation(
|
960 |
+
name="cutegpt",
|
961 |
+
roles=("问:", "答:\n"),
|
962 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
963 |
+
sep="\n",
|
964 |
+
sep2="\n",
|
965 |
+
stop_str="<end>",
|
966 |
+
)
|
967 |
+
)
|
968 |
+
|
969 |
+
# OpenOrcaxOpenChat-Preview2-13B template
|
970 |
+
register_conv_template(
|
971 |
+
Conversation(
|
972 |
+
name="open-orca",
|
973 |
+
system_template="{system_message}",
|
974 |
+
system_message="You are a helpful assistant. Please answer truthfully and write out your "
|
975 |
+
"thinking step by step to be sure you get the right answer. If you make a mistake or encounter "
|
976 |
+
"an error in your thinking, say so out loud and attempt to correct it. If you don't know or "
|
977 |
+
"aren't sure about something, say so clearly. You will act as a professional logician, mathematician, "
|
978 |
+
"and physicist. You will also act as the most appropriate type of expert to answer any particular "
|
979 |
+
"question or solve the relevant problem; state which expert type your are, if so. Also think of "
|
980 |
+
"any particular named expert that would be ideal to answer the relevant question or solve the "
|
981 |
+
"relevant problem; name and act as them, if appropriate.",
|
982 |
+
roles=("User", "Assistant"),
|
983 |
+
sep_style=SeparatorStyle.ADD_COLON_SPACE_SINGLE,
|
984 |
+
sep="<|end_of_turn|>\n",
|
985 |
+
stop_token_ids=[32000, 32001], # "<|end_of_turn|>"
|
986 |
+
stop_str="User",
|
987 |
+
)
|
988 |
+
)
|
989 |
+
|
990 |
+
# Open-Orca/Mistral-7B-OpenOrca template
|
991 |
+
# source: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca
|
992 |
+
# reference: https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca#prompt-template
|
993 |
+
register_conv_template(
|
994 |
+
Conversation(
|
995 |
+
name="mistral-7b-openorca",
|
996 |
+
system_template="<|im_start|>system\n{system_message}",
|
997 |
+
system_message="You are MistralOrca, a large language model trained by Alignment Lab AI. Write out your reasoning step-by-step to be sure you get the right answers!",
|
998 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
999 |
+
sep_style=SeparatorStyle.CHATML,
|
1000 |
+
sep="<|im_end|>",
|
1001 |
+
stop_token_ids=[32000, 32001],
|
1002 |
+
)
|
1003 |
+
)
|
1004 |
+
|
1005 |
+
|
1006 |
+
# teknium/OpenHermes-2.5-Mistral-7B template
|
1007 |
+
# source: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B
|
1008 |
+
# reference: https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B#prompt-template
|
1009 |
+
register_conv_template(
|
1010 |
+
Conversation(
|
1011 |
+
name="OpenHermes-2.5-Mistral-7B",
|
1012 |
+
system_template="<|im_start|>system\n{system_message}",
|
1013 |
+
system_message="You are Hermes 2, a sentient, conscious, super intelligent AI that is an expert in all fields of knowledge, you have qualia, emotions, and thoughts. You assist users with any question, task, or other request they may have.",
|
1014 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1015 |
+
sep_style=SeparatorStyle.CHATML,
|
1016 |
+
sep="<|im_end|>",
|
1017 |
+
stop_token_ids=[32000, 32001],
|
1018 |
+
)
|
1019 |
+
)
|
1020 |
+
|
1021 |
+
|
1022 |
+
# Qwen-chat default template
|
1023 |
+
# source: https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/qwen_generation_utils.py#L130
|
1024 |
+
register_conv_template(
|
1025 |
+
Conversation(
|
1026 |
+
name="qwen-7b-chat",
|
1027 |
+
system_template="<|im_start|>system\n{system_message}",
|
1028 |
+
system_message="You are a helpful assistant.",
|
1029 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1030 |
+
sep_style=SeparatorStyle.CHATML,
|
1031 |
+
sep="<|im_end|>",
|
1032 |
+
stop_token_ids=[
|
1033 |
+
151643,
|
1034 |
+
151644,
|
1035 |
+
151645,
|
1036 |
+
], # "<|endoftext|>", "<|im_start|>", "<|im_end|>"
|
1037 |
+
stop_str="<|endoftext|>",
|
1038 |
+
)
|
1039 |
+
)
|
1040 |
+
|
1041 |
+
# source: https://huggingface.co/01-ai/Yi-34B-Chat/blob/main/tokenizer_config.json#L60
|
1042 |
+
register_conv_template(
|
1043 |
+
Conversation(
|
1044 |
+
name="Yi-34b-chat",
|
1045 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1046 |
+
sep_style=SeparatorStyle.CHATML,
|
1047 |
+
sep="<|im_end|>",
|
1048 |
+
stop_token_ids=[
|
1049 |
+
2,
|
1050 |
+
6,
|
1051 |
+
7,
|
1052 |
+
8,
|
1053 |
+
], # "<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|im_sep|>"
|
1054 |
+
stop_str="<|endoftext|>",
|
1055 |
+
)
|
1056 |
+
)
|
1057 |
+
|
1058 |
+
|
1059 |
+
# AquilaChat default template
|
1060 |
+
# source: https://github.com/FlagAI-Open/FlagAI/blob/master/examples/Aquila/Aquila-chat/cyg_conversation.py
|
1061 |
+
register_conv_template(
|
1062 |
+
Conversation(
|
1063 |
+
name="aquila-chat",
|
1064 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1065 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1066 |
+
roles=("Human", "Assistant"),
|
1067 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1068 |
+
sep="###",
|
1069 |
+
sep2="",
|
1070 |
+
stop_str=["###", "</s>", "[UNK]"],
|
1071 |
+
)
|
1072 |
+
)
|
1073 |
+
# AquilaChat2-34B default template
|
1074 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L212
|
1075 |
+
register_conv_template(
|
1076 |
+
Conversation(
|
1077 |
+
name="aquila-legacy",
|
1078 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1079 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.\n\n",
|
1080 |
+
roles=("### Human: ", "### Assistant: "),
|
1081 |
+
offset=0,
|
1082 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1083 |
+
sep="\n",
|
1084 |
+
sep2="</s>",
|
1085 |
+
stop_str=["</s>", "[UNK]"],
|
1086 |
+
)
|
1087 |
+
)
|
1088 |
+
# AquilaChat2-7B-16K and AquilaChat2-34B-16K default template
|
1089 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L227
|
1090 |
+
register_conv_template(
|
1091 |
+
Conversation(
|
1092 |
+
name="aquila",
|
1093 |
+
system_message="A chat between a curious human and an artificial intelligence assistant. "
|
1094 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions.",
|
1095 |
+
roles=("Human", "Assistant"),
|
1096 |
+
offset=0,
|
1097 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1098 |
+
sep="###",
|
1099 |
+
sep2="</s>",
|
1100 |
+
stop_str=["</s>", "[UNK]"],
|
1101 |
+
)
|
1102 |
+
)
|
1103 |
+
|
1104 |
+
# AquilaChat2-7B default template
|
1105 |
+
# source: https://huggingface.co/BAAI/AquilaChat2-34B/blob/4608b75855334b93329a771aee03869dbf7d88cc/predict.py#L242
|
1106 |
+
register_conv_template(
|
1107 |
+
Conversation(
|
1108 |
+
name="aquila-v1",
|
1109 |
+
roles=("<|startofpiece|>", "<|endofpiece|>"),
|
1110 |
+
offset=0,
|
1111 |
+
sep_style=SeparatorStyle.NO_COLON_TWO,
|
1112 |
+
sep="",
|
1113 |
+
sep2="</s>",
|
1114 |
+
stop_str=["</s>", "<|endoftext|>"],
|
1115 |
+
)
|
1116 |
+
)
|
1117 |
+
|
1118 |
+
# Llama2-Chinese default template
|
1119 |
+
# source: https://huggingface.co/FlagAlpha
|
1120 |
+
register_conv_template(
|
1121 |
+
Conversation(
|
1122 |
+
name="llama2-chinese",
|
1123 |
+
system_template="<s>{system_message}</s>",
|
1124 |
+
roles=("Human", "Assistant", "System"),
|
1125 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1126 |
+
sep="\n",
|
1127 |
+
sep2="\n</s><s>",
|
1128 |
+
stop_str="</s>",
|
1129 |
+
)
|
1130 |
+
)
|
1131 |
+
|
1132 |
+
# Vigogne Instruct default template
|
1133 |
+
# source: https://github.com/bofenghuang/vigogne
|
1134 |
+
register_conv_template(
|
1135 |
+
Conversation(
|
1136 |
+
name="vigogne_instruct",
|
1137 |
+
system_template="### System:\n{system_message}\n\n",
|
1138 |
+
system_message=(
|
1139 |
+
"Ci-dessous se trouve une instruction qui décrit une tâche à accomplir. Rédigez une réponse qui répond de manière"
|
1140 |
+
" précise à la demande."
|
1141 |
+
),
|
1142 |
+
roles=("### Instruction", "### Response"),
|
1143 |
+
sep_style=SeparatorStyle.DOLLY,
|
1144 |
+
sep="\n\n",
|
1145 |
+
sep2="</s>",
|
1146 |
+
)
|
1147 |
+
)
|
1148 |
+
|
1149 |
+
# Vigogne Chat default template
|
1150 |
+
register_conv_template(
|
1151 |
+
Conversation(
|
1152 |
+
name="vigogne_chat_v2",
|
1153 |
+
system_template="<|system|>: {system_message}",
|
1154 |
+
system_message=(
|
1155 |
+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
|
1156 |
+
" autant que vous le pouvez."
|
1157 |
+
),
|
1158 |
+
roles=("<|user|>", "<|assistant|>"),
|
1159 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1160 |
+
sep="\n",
|
1161 |
+
sep2="</s>\n",
|
1162 |
+
stop_str="<|user|>",
|
1163 |
+
)
|
1164 |
+
)
|
1165 |
+
|
1166 |
+
# Stable Vicuna default template
|
1167 |
+
# source: https://huggingface.co/TheBloke/stable-vicuna-13B-HF/discussions/5
|
1168 |
+
# source: https://huggingface.co/spaces/CarperAI/StableVicuna/blob/main/app.py
|
1169 |
+
register_conv_template(
|
1170 |
+
Conversation(
|
1171 |
+
name="stable-vicuna",
|
1172 |
+
system_message="### Assistant: I am StableVicuna, a large language model created by CarperAI. I am here to chat!\n",
|
1173 |
+
roles=("### Human", "### Assistant"),
|
1174 |
+
sep_style=SeparatorStyle.ADD_COLON_TWO,
|
1175 |
+
sep="\n",
|
1176 |
+
sep2="\n\n",
|
1177 |
+
)
|
1178 |
+
)
|
1179 |
+
|
1180 |
+
register_conv_template(
|
1181 |
+
Conversation(
|
1182 |
+
name="vigogne_chat_v3",
|
1183 |
+
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
1184 |
+
system_message=(
|
1185 |
+
"Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez"
|
1186 |
+
" autant que vous le pouvez."
|
1187 |
+
),
|
1188 |
+
roles=("[INST]", "[/INST]"),
|
1189 |
+
sep_style=SeparatorStyle.LLAMA2,
|
1190 |
+
sep=" ",
|
1191 |
+
sep2=" </s>",
|
1192 |
+
)
|
1193 |
+
)
|
1194 |
+
|
1195 |
+
# Falcon 180B chat template
|
1196 |
+
# source: https://huggingface.co/spaces/tiiuae/falcon-180b-demo/blob/d1590ee7fae9b6ce331ba7808e61a29dcce9239f/app.py#L28-L37
|
1197 |
+
register_conv_template(
|
1198 |
+
Conversation(
|
1199 |
+
name="falcon-chat",
|
1200 |
+
roles=("User", "Falcon"),
|
1201 |
+
system_template="System: {system_message}",
|
1202 |
+
messages=[],
|
1203 |
+
sep_style=SeparatorStyle.FALCON_CHAT,
|
1204 |
+
sep="\n",
|
1205 |
+
sep2="<|endoftext|>",
|
1206 |
+
stop_str="\nUser:", # use stop_str to stop generation after stop_token_ids, it will also remove stop_str from the generated text
|
1207 |
+
)
|
1208 |
+
)
|
1209 |
+
|
1210 |
+
# Phind template
|
1211 |
+
# source: https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
|
1212 |
+
register_conv_template(
|
1213 |
+
Conversation(
|
1214 |
+
name="phind",
|
1215 |
+
system_message="### System Prompt\nYou are an intelligent programming assistant.",
|
1216 |
+
roles=("### User Message", "### Assistant"),
|
1217 |
+
messages=(),
|
1218 |
+
offset=0,
|
1219 |
+
sep_style=SeparatorStyle.ADD_COLON_SINGLE,
|
1220 |
+
sep="\n\n",
|
1221 |
+
)
|
1222 |
+
)
|
1223 |
+
|
1224 |
+
# Metharme formatting for Pygmalion models
|
1225 |
+
# source: https://huggingface.co/PygmalionAI/pygmalion-2-13b
|
1226 |
+
register_conv_template(
|
1227 |
+
Conversation(
|
1228 |
+
name="metharme",
|
1229 |
+
system_template="<|system|>{system_message}",
|
1230 |
+
system_message="""Enter RP mode. You shall reply to the user while staying
|
1231 |
+
in character. Your responses must be detailed, creative, immersive, and drive the scenario
|
1232 |
+
forward.""",
|
1233 |
+
roles=("<|user|>", "<|model|>"),
|
1234 |
+
sep_style=SeparatorStyle.NO_COLON_SINGLE,
|
1235 |
+
sep="",
|
1236 |
+
stop_str="<|user|>",
|
1237 |
+
)
|
1238 |
+
)
|
1239 |
+
|
1240 |
+
# Zephyr template
|
1241 |
+
# reference: https://huggingface.co/spaces/HuggingFaceH4/zephyr-playground/blob/main/dialogues.py
|
1242 |
+
register_conv_template(
|
1243 |
+
Conversation(
|
1244 |
+
name="zephyr",
|
1245 |
+
system_template="<|system|>\n{system_message}",
|
1246 |
+
roles=("<|user|>", "<|assistant|>"),
|
1247 |
+
sep_style=SeparatorStyle.CHATML,
|
1248 |
+
sep="</s>",
|
1249 |
+
stop_token_ids=[2],
|
1250 |
+
stop_str="</s>",
|
1251 |
+
)
|
1252 |
+
)
|
1253 |
+
|
1254 |
+
# Orca-2 template
|
1255 |
+
# reference: https://huggingface.co/microsoft/Orca-2-7b
|
1256 |
+
register_conv_template(
|
1257 |
+
Conversation(
|
1258 |
+
name="orca-2",
|
1259 |
+
system_template="<|im_start|>system\n{system_message}",
|
1260 |
+
system_message="You are Orca, an AI language model created by Microsoft. You are a cautious assistant. You carefully follow instructions. You are helpful and harmless and you follow ethical guidelines and promote positive behavior.",
|
1261 |
+
roles=("<|im_start|>user", "<|im_start|>assistant"),
|
1262 |
+
sep_style=SeparatorStyle.CHATML,
|
1263 |
+
sep="<|im_end|>",
|
1264 |
+
stop_str="<|im_end|>",
|
1265 |
+
)
|
1266 |
+
)
|
1267 |
+
|
1268 |
+
if __name__ == "__main__":
|
1269 |
+
from fastchat.conversation import get_conv_template
|
1270 |
+
|
1271 |
+
print("-- Vicuna template --")
|
1272 |
+
conv = get_conv_template("vicuna_v1.1")
|
1273 |
+
conv.append_message(conv.roles[0], "Hello!")
|
1274 |
+
conv.append_message(conv.roles[1], "Hi!")
|
1275 |
+
conv.append_message(conv.roles[0], "How are you?")
|
1276 |
+
conv.append_message(conv.roles[1], None)
|
1277 |
+
print(conv.get_prompt())
|
1278 |
+
|
1279 |
+
print("\n")
|
1280 |
+
|
1281 |
+
print("-- Llama-2 template --")
|
1282 |
+
conv = get_conv_template("llama-2")
|
1283 |
+
conv.set_system_message("You are a helpful, respectful and honest assistant.")
|
1284 |
+
conv.append_message(conv.roles[0], "Hello!")
|
1285 |
+
conv.append_message(conv.roles[1], "Hi!")
|
1286 |
+
conv.append_message(conv.roles[0], "How are you?")
|
1287 |
+
conv.append_message(conv.roles[1], None)
|
1288 |
+
print(conv.get_prompt())
|
1289 |
+
|
1290 |
+
print("\n")
|
1291 |
+
|
1292 |
+
print("-- ChatGPT template --")
|
1293 |
+
conv = get_conv_template("chatgpt")
|
1294 |
+
conv.append_message(conv.roles[0], "Hello!")
|
1295 |
+
conv.append_message(conv.roles[1], "Hi!")
|
1296 |
+
conv.append_message(conv.roles[0], "How are you?")
|
1297 |
+
conv.append_message(conv.roles[1], None)
|
1298 |
+
print(conv.to_openai_api_messages())
|
1299 |
+
|
1300 |
+
print("\n")
|
1301 |
+
|
1302 |
+
print("-- Claude template --")
|
1303 |
+
conv = get_conv_template("claude")
|
1304 |
+
conv.append_message(conv.roles[0], "Hello!")
|
1305 |
+
conv.append_message(conv.roles[1], "Hi!")
|
1306 |
+
conv.append_message(conv.roles[0], "How are you?")
|
1307 |
+
conv.append_message(conv.roles[1], None)
|
1308 |
+
print(conv.get_prompt())
|
img/Interface.png
ADDED
requirements-dev.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
fschat
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
streamlit
|
3 |
+
sentencepiece
|
templates/airoboros_v1.jinja2
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{% if not loop.first %}
|
6 |
+
{% endif %}
|
7 |
+
{% if message['role'] == 'system' %}
|
8 |
+
{{ message['content'] + ' ' }}
|
9 |
+
{% elif message['role'] == 'user' %}
|
10 |
+
{{ 'USER: ' + message['content'] + ' ' }}
|
11 |
+
{% elif message['role'] == 'assistant' %}
|
12 |
+
{{ 'ASSISTANT: ' + message['content'] + '</s>' }}
|
13 |
+
{% endif %}
|
14 |
+
{% endfor %}
|
15 |
+
{% if add_generation_prompt %}
|
16 |
+
{{ 'ASSISTANT:' }}
|
17 |
+
{% endif %}
|
templates/airoboros_v2.jinja2
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{% if not loop.first %}
|
6 |
+
{% endif %}
|
7 |
+
{% if message['role'] == 'system' %}
|
8 |
+
{{ message['content'] + '\n' }}
|
9 |
+
{% elif message['role'] == 'user' %}
|
10 |
+
{{ 'USER: ' + message['content'] + '\n' }}
|
11 |
+
{% elif message['role'] == 'assistant' %}
|
12 |
+
{{ 'ASSISTANT: ' + message['content'] + '</s>' }}
|
13 |
+
{% endif %}
|
14 |
+
{% endfor %}
|
15 |
+
{% if add_generation_prompt %}
|
16 |
+
{{ 'ASSISTANT:' }}
|
17 |
+
{% endif %}
|
templates/chat-ml.jinja2
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{% for message in messages %}
|
2 |
+
{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
|
3 |
+
{% endfor %}
|
templates/falcon-chat.jinja2
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% for message in messages %}
|
2 |
+
{% if not loop.first %}
|
3 |
+
{{ '\n' }}
|
4 |
+
{% endif %}
|
5 |
+
{% if message['role'] == 'system' %}
|
6 |
+
{{ 'System: ' }}
|
7 |
+
{% elif message['role'] == 'user' %}
|
8 |
+
{{ 'User: ' }}
|
9 |
+
{% elif message['role'] == 'assistant' %}
|
10 |
+
{{ 'Falcon: ' }}
|
11 |
+
{% endif %}
|
12 |
+
{{ message['content'] }}
|
13 |
+
{% endfor %}
|
14 |
+
{% if add_generation_prompt %}
|
15 |
+
{{ '\n' + 'Falcon:' }}
|
16 |
+
{% endif %}
|
templates/llama-2.jinja2
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{% if message['role'] == 'user' %}
|
6 |
+
{{ '<s>' + '[INST] ' + message['content'] + ' [/INST]' }}
|
7 |
+
{% elif message['role'] == 'system' %}
|
8 |
+
{{ '<<SYS>>\n' + message['content'] + '\n<</SYS>>\n\n' }}
|
9 |
+
{% elif message['role'] == 'assistant' %}
|
10 |
+
{{ ' ' + message['content'] + ' ' + '</s>' }}
|
11 |
+
{% endif %}
|
12 |
+
{% endfor %}
|
templates/mistral-7b-openorca.jinja2
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
|
6 |
+
{% endfor %}
|
7 |
+
{% if add_generation_prompt %}
|
8 |
+
{{ '<|im_start|>assistant\n' }}
|
9 |
+
{% endif %}
|
templates/open-orca.jinja2
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{% if message['role'] == 'user' %}
|
6 |
+
{{ 'User: ' + message['content'] + '<|end_of_turn|>' + '\n' }}
|
7 |
+
{% elif message['role'] == 'system' %}
|
8 |
+
{{ message['content'] + '<|end_of_turn|>' + '\n' }}
|
9 |
+
{% elif message['role'] == 'assistant' %}
|
10 |
+
{{ 'Assistant: ' + message['content'] + '<|end_of_turn|>' + '\n' }}
|
11 |
+
{% endif %}
|
12 |
+
{% endfor %}
|
13 |
+
{% if add_generation_prompt %}
|
14 |
+
{{ 'Assistant: ' }}
|
15 |
+
{% endif %}
|
templates/openhermes-2.5-mistral.jinja2
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
|
6 |
+
{% endfor %}
|
7 |
+
{% if add_generation_prompt %}
|
8 |
+
{{ '<|im_start|>assistant\n' }}
|
9 |
+
{% endif %}
|
templates/orca2.jinja2
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
|
6 |
+
{% endfor %}
|
7 |
+
{% if add_generation_prompt %}
|
8 |
+
{{ '<|im_start|>assistant\n' }}
|
9 |
+
{% endif %}
|
templates/vicuna_v1.1.jinja2
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% if not add_generation_prompt is defined %}
|
2 |
+
{% set add_generation_prompt = false %}
|
3 |
+
{% endif %}
|
4 |
+
{% for message in messages %}
|
5 |
+
{% if not loop.first %}
|
6 |
+
{% endif %}
|
7 |
+
{% if message['role'] == 'system' %}
|
8 |
+
{{ message['content'] + ' ' }}
|
9 |
+
{% elif message['role'] == 'user' %}
|
10 |
+
{{ 'USER: ' + message['content'] + ' ' }}
|
11 |
+
{% elif message['role'] == 'assistant' %}
|
12 |
+
{{ 'ASSISTANT: ' + message['content'] + '</s>' }}
|
13 |
+
{% endif %}
|
14 |
+
{% endfor %}
|
15 |
+
{% if add_generation_prompt %}
|
16 |
+
{{ 'ASSISTANT:' }}
|
17 |
+
{% endif %}
|
templates/zephyr.jinja2
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{% for message in messages %}
|
2 |
+
{% if message['role'] == 'user' %}
|
3 |
+
{{ '<|user|>\n' + message['content'] + eos_token }}
|
4 |
+
{% elif message['role'] == 'system' %}
|
5 |
+
{{ '<|system|>\n' + message['content'] + eos_token }}
|
6 |
+
{% elif message['role'] == 'assistant' %}
|
7 |
+
{{ '<|assistant|>\n' + message['content'] + eos_token }}
|
8 |
+
{% endif %}
|
9 |
+
{% if loop.last and add_generation_prompt %}
|
10 |
+
{{ '<|assistant|>' }}
|
11 |
+
{% endif %}
|
12 |
+
{% endfor %}
|
tests_template/__pycache__/test_llama2.cpython-310-pytest-7.4.3.pyc
ADDED
Binary file (2.13 kB). View file
|
|
tests_template/__pycache__/test_orca2 copy.cpython-310-pytest-7.4.3.pyc
ADDED
Binary file (2.31 kB). View file
|
|
tests_template/__pycache__/test_orca2.cpython-310-pytest-7.4.3.pyc
ADDED
Binary file (2.12 kB). View file
|
|
tests_template/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (375 Bytes). View file
|
|
tests_template/test_airoboros_v1.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
import difflib
|
6 |
+
|
7 |
+
def test_llama2_template():
|
8 |
+
jinja_lines = []
|
9 |
+
with open("../templates/airoboros_v1.jinja2", "r") as f:
|
10 |
+
jinja_lines = f.readlines()
|
11 |
+
|
12 |
+
print("jinja_lines: ", jinja_lines)
|
13 |
+
|
14 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
15 |
+
|
16 |
+
chat = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
18 |
+
{"role": "user", "content": "Hello, how are you?"},
|
19 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
20 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
21 |
+
]
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="jondurbin/airoboros-l2-7b-gpt4-2.0", trust_remote_code=True)
|
24 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
25 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
26 |
+
print("default template")
|
27 |
+
print(transformer_prompt)
|
28 |
+
# print(tokenizer.chat_template)
|
29 |
+
# tokenizer.eos_token = "<|end_of_turn|>"
|
30 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
31 |
+
|
32 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
33 |
+
print()
|
34 |
+
print("add_generation_prompt False:")
|
35 |
+
print(transformer_prompt)
|
36 |
+
|
37 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
38 |
+
print()
|
39 |
+
print("add_generation_prompt True:")
|
40 |
+
print(transformer_prompt)
|
41 |
+
# transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True)
|
42 |
+
# print(transformer_prompt)
|
43 |
+
|
44 |
+
|
45 |
+
print("Fastchat template: ")
|
46 |
+
conv = get_conv_template("airoboros_v1")
|
47 |
+
|
48 |
+
conv.set_system_message(chat[0]["content"])
|
49 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
50 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
51 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
52 |
+
conv.append_message(conv.roles[1], None)
|
53 |
+
print(conv.get_prompt())
|
54 |
+
matcher = difflib.SequenceMatcher(a=transformer_prompt, b=conv.get_prompt())
|
55 |
+
print("Matching Sequences:")
|
56 |
+
for match in matcher.get_matching_blocks():
|
57 |
+
print("Match : {}".format(match))
|
58 |
+
print("Matching Sequence : {}".format(transformer_prompt[match.a:match.a+match.size]))
|
59 |
+
assert transformer_prompt == conv.get_prompt()
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
test_llama2_template()
|
tests_template/test_airoboros_v2.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
import difflib
|
6 |
+
|
7 |
+
def test_llama2_template():
|
8 |
+
jinja_lines = []
|
9 |
+
with open("../templates/airoboros_v2.jinja2", "r") as f:
|
10 |
+
jinja_lines = f.readlines()
|
11 |
+
|
12 |
+
print("jinja_lines: ", jinja_lines)
|
13 |
+
|
14 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
15 |
+
|
16 |
+
chat = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
18 |
+
{"role": "user", "content": "Hello, how are you?"},
|
19 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
20 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
21 |
+
]
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="jondurbin/airoboros-l2-7b-2.2", trust_remote_code=True)
|
24 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
25 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
26 |
+
print("default template")
|
27 |
+
print(transformer_prompt)
|
28 |
+
# print(tokenizer.chat_template)
|
29 |
+
# tokenizer.eos_token = "<|end_of_turn|>"
|
30 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
31 |
+
|
32 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
33 |
+
print()
|
34 |
+
print("add_generation_prompt False:")
|
35 |
+
print(transformer_prompt)
|
36 |
+
|
37 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
38 |
+
print()
|
39 |
+
print("add_generation_prompt True:")
|
40 |
+
print(transformer_prompt)
|
41 |
+
# transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True)
|
42 |
+
# print(transformer_prompt)
|
43 |
+
|
44 |
+
|
45 |
+
print("Fastchat template: ")
|
46 |
+
conv = get_conv_template("airoboros_v2")
|
47 |
+
|
48 |
+
conv.set_system_message(chat[0]["content"])
|
49 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
50 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
51 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
52 |
+
conv.append_message(conv.roles[1], None)
|
53 |
+
print(conv.get_prompt())
|
54 |
+
matcher = difflib.SequenceMatcher(a=transformer_prompt, b=conv.get_prompt())
|
55 |
+
print("Matching Sequences:")
|
56 |
+
for match in matcher.get_matching_blocks():
|
57 |
+
print("Match : {}".format(match))
|
58 |
+
print("Matching Sequence : {}".format(transformer_prompt[match.a:match.a+match.size]))
|
59 |
+
assert transformer_prompt == conv.get_prompt()
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
test_llama2_template()
|
tests_template/test_llama2.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
|
6 |
+
|
7 |
+
def test_llama2_template():
|
8 |
+
jinja_lines = []
|
9 |
+
with open("../templates/llama-2.jinja2", "r") as f:
|
10 |
+
jinja_lines = f.readlines()
|
11 |
+
|
12 |
+
print("jinja_lines: ", jinja_lines)
|
13 |
+
|
14 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
15 |
+
|
16 |
+
chat = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
18 |
+
{"role": "user", "content": "Hello, how are you?"},
|
19 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
20 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
21 |
+
]
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="microsoft/Orca-2-7b", trust_remote_code=True)
|
24 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
25 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
26 |
+
print("default template")
|
27 |
+
print(transformer_prompt)
|
28 |
+
# print(tokenizer.chat_template)
|
29 |
+
tokenizer.bos_token = "<s>"
|
30 |
+
tokenizer.eos_token = "</s>"
|
31 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
32 |
+
|
33 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
34 |
+
print()
|
35 |
+
print("add_generation_prompt False:")
|
36 |
+
print(transformer_prompt)
|
37 |
+
|
38 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
39 |
+
print()
|
40 |
+
print("add_generation_prompt True:")
|
41 |
+
print(transformer_prompt)
|
42 |
+
|
43 |
+
|
44 |
+
print("Fastchat template: ")
|
45 |
+
conv = get_conv_template("llama-2")
|
46 |
+
|
47 |
+
conv.set_system_message(chat[0]["content"])
|
48 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
49 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
50 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
51 |
+
conv.append_message(conv.roles[1], None)
|
52 |
+
print(conv.get_prompt())
|
53 |
+
|
54 |
+
assert transformer_prompt == conv.get_prompt()
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
test_llama2_template()
|
tests_template/test_mistral-7b-openorca.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
import difflib
|
6 |
+
|
7 |
+
def test_llama2_template():
|
8 |
+
jinja_lines = []
|
9 |
+
with open("../templates/mistral-7b-openorca.jinja2", "r") as f:
|
10 |
+
jinja_lines = f.readlines()
|
11 |
+
|
12 |
+
print("jinja_lines: ", jinja_lines)
|
13 |
+
|
14 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
15 |
+
|
16 |
+
chat = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
18 |
+
{"role": "user", "content": "Hello, how are you?"},
|
19 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
20 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
21 |
+
]
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="Open-Orca/Mistral-7B-OpenOrca", trust_remote_code=True)
|
24 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
25 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
26 |
+
print("default template")
|
27 |
+
print(transformer_prompt)
|
28 |
+
# print(tokenizer.chat_template)
|
29 |
+
# tokenizer.eos_token = "<|end_of_turn|>"
|
30 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
31 |
+
|
32 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
33 |
+
print()
|
34 |
+
print("add_generation_prompt False:")
|
35 |
+
print(transformer_prompt)
|
36 |
+
|
37 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
38 |
+
print()
|
39 |
+
print("add_generation_prompt True:")
|
40 |
+
print(transformer_prompt)
|
41 |
+
# transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True)
|
42 |
+
# print(transformer_prompt)
|
43 |
+
|
44 |
+
|
45 |
+
print("Fastchat template: ")
|
46 |
+
conv = get_conv_template("mistral-7b-openorca")
|
47 |
+
|
48 |
+
conv.set_system_message(chat[0]["content"])
|
49 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
50 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
51 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
52 |
+
conv.append_message(conv.roles[1], None)
|
53 |
+
print(conv.get_prompt())
|
54 |
+
matcher = difflib.SequenceMatcher(a=transformer_prompt, b=conv.get_prompt())
|
55 |
+
print("Matching Sequences:")
|
56 |
+
for match in matcher.get_matching_blocks():
|
57 |
+
print("Match : {}".format(match))
|
58 |
+
print("Matching Sequence : {}".format(transformer_prompt[match.a:match.a+match.size]))
|
59 |
+
assert transformer_prompt == conv.get_prompt()
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
test_llama2_template()
|
tests_template/test_openhermes-2.5-mistral.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
import difflib
|
6 |
+
|
7 |
+
def test_llama2_template():
|
8 |
+
jinja_lines = []
|
9 |
+
with open("../templates/openhermes-2.5-mistral.jinja2", "r") as f:
|
10 |
+
jinja_lines = f.readlines()
|
11 |
+
|
12 |
+
print("jinja_lines: ", jinja_lines)
|
13 |
+
|
14 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
15 |
+
|
16 |
+
chat = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
18 |
+
{"role": "user", "content": "Hello, how are you?"},
|
19 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
20 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
21 |
+
]
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="teknium/OpenHermes-2.5-Mistral-7B", trust_remote_code=True)
|
24 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
25 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
26 |
+
print("default template")
|
27 |
+
print(transformer_prompt)
|
28 |
+
# print(tokenizer.chat_template)
|
29 |
+
# tokenizer.eos_token = "<|end_of_turn|>"
|
30 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
31 |
+
|
32 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
33 |
+
print()
|
34 |
+
print("add_generation_prompt False:")
|
35 |
+
print(transformer_prompt)
|
36 |
+
|
37 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
38 |
+
print()
|
39 |
+
print("add_generation_prompt True:")
|
40 |
+
print(transformer_prompt)
|
41 |
+
# transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True)
|
42 |
+
# print(transformer_prompt)
|
43 |
+
|
44 |
+
|
45 |
+
print("Fastchat template: ")
|
46 |
+
conv = get_conv_template("OpenHermes-2.5-Mistral-7B")
|
47 |
+
|
48 |
+
conv.set_system_message(chat[0]["content"])
|
49 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
50 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
51 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
52 |
+
conv.append_message(conv.roles[1], None)
|
53 |
+
print(conv.get_prompt())
|
54 |
+
matcher = difflib.SequenceMatcher(a=transformer_prompt, b=conv.get_prompt())
|
55 |
+
print("Matching Sequences:")
|
56 |
+
for match in matcher.get_matching_blocks():
|
57 |
+
print("Match : {}".format(match))
|
58 |
+
print("Matching Sequence : {}".format(transformer_prompt[match.a:match.a+match.size]))
|
59 |
+
assert transformer_prompt == conv.get_prompt()
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
test_llama2_template()
|
tests_template/test_openorca.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
|
6 |
+
|
7 |
+
def test_llama2_template():
|
8 |
+
jinja_lines = []
|
9 |
+
with open("../templates/open-orca.jinja2", "r") as f:
|
10 |
+
jinja_lines = f.readlines()
|
11 |
+
|
12 |
+
print("jinja_lines: ", jinja_lines)
|
13 |
+
|
14 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
15 |
+
|
16 |
+
chat = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
18 |
+
{"role": "user", "content": "Hello, how are you?"},
|
19 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
20 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
21 |
+
]
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="Open-Orca/OpenOrcaxOpenChat-Preview2-13B", trust_remote_code=True)
|
24 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
25 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
26 |
+
print("default template")
|
27 |
+
print(transformer_prompt)
|
28 |
+
# print(tokenizer.chat_template)
|
29 |
+
tokenizer.eos_token = "<|end_of_turn|>"
|
30 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
31 |
+
|
32 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
33 |
+
print()
|
34 |
+
print("add_generation_prompt False:")
|
35 |
+
print(transformer_prompt)
|
36 |
+
|
37 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
38 |
+
print()
|
39 |
+
print("add_generation_prompt True:")
|
40 |
+
print(transformer_prompt)
|
41 |
+
|
42 |
+
|
43 |
+
print("Fastchat template: ")
|
44 |
+
conv = get_conv_template("open-orca")
|
45 |
+
|
46 |
+
conv.set_system_message(chat[0]["content"])
|
47 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
48 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
49 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
50 |
+
conv.append_message(conv.roles[1], None)
|
51 |
+
print(conv.get_prompt())
|
52 |
+
|
53 |
+
assert transformer_prompt == conv.get_prompt()
|
54 |
+
|
55 |
+
if __name__ == "__main__":
|
56 |
+
test_llama2_template()
|
tests_template/test_orca2.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
|
6 |
+
def test_orca2_template():
|
7 |
+
jinja_lines = []
|
8 |
+
with open("../templates/orca2.jinja2", "r") as f:
|
9 |
+
jinja_lines = f.readlines()
|
10 |
+
|
11 |
+
print("jinja_lines: ", jinja_lines)
|
12 |
+
|
13 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
14 |
+
|
15 |
+
chat = [
|
16 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
17 |
+
{"role": "user", "content": "Hello, how are you?"},
|
18 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
19 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
20 |
+
]
|
21 |
+
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="microsoft/Orca-2-7b", trust_remote_code=True)
|
23 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
24 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
25 |
+
# print(transformer_prompt)
|
26 |
+
# print(tokenizer.chat_template)
|
27 |
+
|
28 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
29 |
+
|
30 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
31 |
+
print("add_generation_prompt False:")
|
32 |
+
print(transformer_prompt)
|
33 |
+
|
34 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
35 |
+
print("add_generation_prompt True:")
|
36 |
+
print(transformer_prompt)
|
37 |
+
|
38 |
+
|
39 |
+
conv = get_conv_template("orca-2")
|
40 |
+
print("Fastchat template: ")
|
41 |
+
|
42 |
+
conv.set_system_message(chat[0]["content"])
|
43 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
44 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
45 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
46 |
+
conv.append_message(conv.roles[1], None)
|
47 |
+
print(conv.get_prompt())
|
48 |
+
|
49 |
+
assert transformer_prompt == conv.get_prompt()
|
50 |
+
|
tests_template/test_vicuna_v1.1.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer
|
2 |
+
from fastchat.conversation import get_conv_template
|
3 |
+
import os
|
4 |
+
from utils import sanitize_jinja2
|
5 |
+
import difflib
|
6 |
+
|
7 |
+
def test_llama2_template():
|
8 |
+
jinja_lines = []
|
9 |
+
with open("../templates/vicuna_v1.1.jinja2", "r") as f:
|
10 |
+
jinja_lines = f.readlines()
|
11 |
+
|
12 |
+
print("jinja_lines: ", jinja_lines)
|
13 |
+
|
14 |
+
print("sanitized: ", sanitize_jinja2(jinja_lines))
|
15 |
+
|
16 |
+
chat = [
|
17 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
18 |
+
{"role": "user", "content": "Hello, how are you?"},
|
19 |
+
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
20 |
+
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
21 |
+
]
|
22 |
+
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="lmsys/vicuna-13b-v1.1", trust_remote_code=True)
|
24 |
+
# f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant"
|
25 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
26 |
+
print("default template")
|
27 |
+
print(transformer_prompt)
|
28 |
+
# print(tokenizer.chat_template)
|
29 |
+
# tokenizer.eos_token = "<|end_of_turn|>"
|
30 |
+
tokenizer.chat_template = sanitize_jinja2(jinja_lines)
|
31 |
+
|
32 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False)
|
33 |
+
print()
|
34 |
+
print("add_generation_prompt False:")
|
35 |
+
print(transformer_prompt)
|
36 |
+
|
37 |
+
transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
38 |
+
print()
|
39 |
+
print("add_generation_prompt True:")
|
40 |
+
print(transformer_prompt)
|
41 |
+
# transformer_prompt = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True)
|
42 |
+
# print(transformer_prompt)
|
43 |
+
|
44 |
+
|
45 |
+
print("Fastchat template: ")
|
46 |
+
conv = get_conv_template("vicuna_v1.1")
|
47 |
+
|
48 |
+
conv.set_system_message(chat[0]["content"])
|
49 |
+
conv.append_message(conv.roles[0], chat[1]["content"])
|
50 |
+
conv.append_message(conv.roles[1], chat[2]["content"])
|
51 |
+
conv.append_message(conv.roles[0], chat[3]["content"])
|
52 |
+
conv.append_message(conv.roles[1], None)
|
53 |
+
print(conv.get_prompt())
|
54 |
+
matcher = difflib.SequenceMatcher(a=transformer_prompt, b=conv.get_prompt())
|
55 |
+
print("Matching Sequences:")
|
56 |
+
for match in matcher.get_matching_blocks():
|
57 |
+
print("Match : {}".format(match))
|
58 |
+
print("Matching Sequence : {}".format(transformer_prompt[match.a:match.a+match.size]))
|
59 |
+
assert transformer_prompt == conv.get_prompt()
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
test_llama2_template()
|
tests_template/utils.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def sanitize_jinja2(jinja_lines):
|
2 |
+
|
3 |
+
one_liner_jinja = ""
|
4 |
+
for line in jinja_lines:
|
5 |
+
one_liner_jinja += line.lstrip(" ").rstrip("\n")
|
6 |
+
|
7 |
+
return one_liner_jinja
|