Upload folder using huggingface_hub

#2
README.md CHANGED
@@ -8,10 +8,14 @@ tags:
8
  - mlx
9
  license_link: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/LICENSE
10
  pipeline_tag: text-generation
 
 
 
 
11
  ---
12
 
13
  # mlx-community/Phi-3-mini-4k-instruct-4bit-no-q-embed
14
- This model was converted to MLX format from [`microsoft/Phi-3-mini-4k-instruct`]() using mlx-lm version **0.10.0**.
15
  Refer to the [original model card](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) for more details on the model.
16
  ## Use with mlx
17
 
 
8
  - mlx
9
  license_link: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/LICENSE
10
  pipeline_tag: text-generation
11
+ widget:
12
+ - messages:
13
+ - role: user
14
+ content: Can you provide ways to eat combinations of bananas and dragonfruits?
15
  ---
16
 
17
  # mlx-community/Phi-3-mini-4k-instruct-4bit-no-q-embed
18
+ This model was converted to MLX format from [`microsoft/Phi-3-mini-4k-instruct`]() using mlx-lm version **0.12.0**.
19
  Refer to the [original model card](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) for more details on the model.
20
  ## Use with mlx
21
 
added_tokens.json CHANGED
@@ -1,40 +1,13 @@
1
  {
2
- "<|/code|>": 32014,
3
- "<|/data|>": 32033,
4
- "<|/inst|>": 32037,
5
- "<|/query|>": 32031,
6
- "<|/sys|>": 32035,
7
- "<|assistant_mask|>": 32017,
8
  "<|assistant|>": 32001,
9
- "<|calc|>": 32012,
10
- "<|code|>": 32013,
11
- "<|continue|>": 32009,
12
- "<|data|>": 32032,
13
- "<|diff_marker|>": 32025,
14
- "<|disc_sep|>": 32029,
15
- "<|disc_start|>": 32028,
16
- "<|disc_thread|><|query|>": 32030,
17
  "<|endoftext|>": 32000,
18
  "<|end|>": 32007,
19
- "<|fim_middle|>": 32021,
20
- "<|fim_prefix|>": 32020,
21
- "<|fim_suffix|>": 32022,
22
- "<|function_call|>": 32005,
23
- "<|function_list|>": 32011,
24
- "<|function_output|>": 32003,
25
- "<|ghissue|>": 32026,
26
- "<|ghreview|>": 32027,
27
- "<|inst|>": 32036,
28
- "<|ipynb_marker|>": 32024,
29
- "<|message|>": 32019,
30
- "<|meta_start|>": 32023,
31
- "<|raw|>": 32008,
32
- "<|resource|>": 32016,
33
- "<|start|>": 32018,
34
- "<|step|>": 32002,
35
- "<|summary|>": 32015,
36
  "<|system|>": 32006,
37
- "<|sys|>": 32034,
38
- "<|tag|>": 32004,
39
  "<|user|>": 32010
40
  }
 
1
  {
 
 
 
 
 
 
2
  "<|assistant|>": 32001,
 
 
 
 
 
 
 
 
3
  "<|endoftext|>": 32000,
4
  "<|end|>": 32007,
5
+ "<|placeholder1|>": 32002,
6
+ "<|placeholder2|>": 32003,
7
+ "<|placeholder3|>": 32004,
8
+ "<|placeholder4|>": 32005,
9
+ "<|placeholder5|>": 32008,
10
+ "<|placeholder6|>": 32009,
 
 
 
 
 
 
 
 
 
 
 
11
  "<|system|>": 32006,
 
 
12
  "<|user|>": 32010
13
  }
config.json CHANGED
@@ -29,7 +29,7 @@
29
  "rms_norm_eps": 1e-05,
30
  "rope_scaling": null,
31
  "rope_theta": 10000.0,
32
- "sliding_window": 2048,
33
  "tie_word_embeddings": false,
34
  "torch_dtype": "bfloat16",
35
  "transformers_version": "4.39.3",
 
29
  "rms_norm_eps": 1e-05,
30
  "rope_scaling": null,
31
  "rope_theta": 10000.0,
32
+ "sliding_window": 2047,
33
  "tie_word_embeddings": false,
34
  "torch_dtype": "bfloat16",
35
  "transformers_version": "4.39.3",
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e908f149cf6056b2f8fee5e1443cdae521be06558907eb952fbd5f383ad533b8
3
  size 2291290600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a877c43b4aa0ea54f197c9393484d950e30af4b88da49cba78fa9ba93aeccaa
3
  size 2291290600
sample_finetune.py CHANGED
@@ -1,28 +1,68 @@
1
- import torch
 
 
 
2
  from datasets import load_dataset
 
 
 
3
  from trl import SFTTrainer
4
- from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments
5
 
6
  """
7
  A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
8
- a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py
9
-
10
- 1. Install accelerate:
 
 
 
 
 
 
11
  conda install -c conda-forge accelerate
12
- 2. Setup accelerate config:
 
 
 
13
  accelerate config
14
- to simply use all the GPUs available:
15
- python -c "from accelerate.utils import write_basic_config; write_basic_config(mixed_precision='bf16')"
16
- check accelerate config:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  accelerate env
18
- 3. Run the code:
19
  accelerate launch sample_finetune.py
20
  """
21
 
 
 
 
22
  ###################
23
  # Hyper-parameters
24
  ###################
25
- args = {
26
  "bf16": True,
27
  "do_eval": False,
28
  "learning_rate": 5.0e-06,
@@ -35,7 +75,7 @@ args = {
35
  "output_dir": "./checkpoint_dir",
36
  "overwrite_output_dir": True,
37
  "per_device_eval_batch_size": 4,
38
- "per_device_train_batch_size": 8,
39
  "remove_unused_columns": True,
40
  "save_steps": 100,
41
  "save_total_limit": 1,
@@ -45,8 +85,43 @@ args = {
45
  "gradient_accumulation_steps": 1,
46
  "warmup_ratio": 0.2,
47
  }
48
-
49
- training_args = TrainingArguments(**args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  ################
52
  # Modle Loading
@@ -58,14 +133,16 @@ model_kwargs = dict(
58
  trust_remote_code=True,
59
  attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
60
  torch_dtype=torch.bfloat16,
61
- device_map="cuda",
62
  )
63
  model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
64
  tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
 
65
  tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
66
  tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
67
  tokenizer.padding_side = 'right'
68
 
 
69
  ##################
70
  # Data Processing
71
  ##################
@@ -82,26 +159,36 @@ def apply_chat_template(
82
  return example
83
 
84
  raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
85
- column_names = list(raw_dataset["train_sft"].features)
 
 
86
 
87
- processed_dataset = raw_dataset.map(
88
  apply_chat_template,
89
  fn_kwargs={"tokenizer": tokenizer},
90
- num_proc=12,
91
  remove_columns=column_names,
92
- desc="Applying chat template",
93
  )
94
- train_dataset = processed_dataset["train_sft"]
95
- eval_dataset = processed_dataset["test_sft"]
 
 
 
 
 
 
 
96
 
97
  ###########
98
  # Training
99
  ###########
100
  trainer = SFTTrainer(
101
  model=model,
102
- args=training_args,
103
- train_dataset=train_dataset,
104
- eval_dataset=eval_dataset,
 
105
  max_seq_length=2048,
106
  dataset_text_field="text",
107
  tokenizer=tokenizer,
@@ -113,16 +200,18 @@ trainer.log_metrics("train", metrics)
113
  trainer.save_metrics("train", metrics)
114
  trainer.save_state()
115
 
 
116
  #############
117
  # Evaluation
118
  #############
119
  tokenizer.padding_side = 'left'
120
  metrics = trainer.evaluate()
121
- metrics["eval_samples"] = len(eval_dataset)
122
  trainer.log_metrics("eval", metrics)
123
  trainer.save_metrics("eval", metrics)
124
 
125
- ############
126
- # Save model
127
- ############
128
- trainer.save_model(training_args.output_dir)
 
 
1
+ import sys
2
+ import logging
3
+
4
+ import datasets
5
  from datasets import load_dataset
6
+ from peft import LoraConfig
7
+ import torch
8
+ import transformers
9
  from trl import SFTTrainer
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
11
 
12
  """
13
  A simple example on using SFTTrainer and Accelerate to finetune Phi-3 models. For
14
+ a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
15
+ This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
16
+ script can be run on V100 or later generation GPUs. Here are some suggestions on
17
+ futher reducing memory consumption:
18
+ - reduce batch size
19
+ - decrease lora dimension
20
+ - restrict lora target modules
21
+ Please follow these steps to run the script:
22
+ 1. Install dependencies:
23
  conda install -c conda-forge accelerate
24
+ pip3 install -i https://pypi.org/simple/ bitsandbytes
25
+ pip3 install peft transformers trl datasets
26
+ pip3 install deepspeed
27
+ 2. Setup accelerate and deepspeed config based on the machine used:
28
  accelerate config
29
+ Here is a sample config for deepspeed zero3:
30
+ compute_environment: LOCAL_MACHINE
31
+ debug: false
32
+ deepspeed_config:
33
+ gradient_accumulation_steps: 1
34
+ offload_optimizer_device: none
35
+ offload_param_device: none
36
+ zero3_init_flag: true
37
+ zero3_save_16bit_model: true
38
+ zero_stage: 3
39
+ distributed_type: DEEPSPEED
40
+ downcast_bf16: 'no'
41
+ enable_cpu_affinity: false
42
+ machine_rank: 0
43
+ main_training_function: main
44
+ mixed_precision: bf16
45
+ num_machines: 1
46
+ num_processes: 4
47
+ rdzv_backend: static
48
+ same_network: true
49
+ tpu_env: []
50
+ tpu_use_cluster: false
51
+ tpu_use_sudo: false
52
+ use_cpu: false
53
+ 3. check accelerate config:
54
  accelerate env
55
+ 4. Run the code:
56
  accelerate launch sample_finetune.py
57
  """
58
 
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
  ###################
63
  # Hyper-parameters
64
  ###################
65
+ training_config = {
66
  "bf16": True,
67
  "do_eval": False,
68
  "learning_rate": 5.0e-06,
 
75
  "output_dir": "./checkpoint_dir",
76
  "overwrite_output_dir": True,
77
  "per_device_eval_batch_size": 4,
78
+ "per_device_train_batch_size": 4,
79
  "remove_unused_columns": True,
80
  "save_steps": 100,
81
  "save_total_limit": 1,
 
85
  "gradient_accumulation_steps": 1,
86
  "warmup_ratio": 0.2,
87
  }
88
+
89
+ peft_config = {
90
+ "r": 16,
91
+ "lora_alpha": 32,
92
+ "lora_dropout": 0.05,
93
+ "bias": "none",
94
+ "task_type": "CAUSAL_LM",
95
+ "target_modules": "all-linear",
96
+ "modules_to_save": None,
97
+ }
98
+ train_conf = TrainingArguments(**training_config)
99
+ peft_conf = LoraConfig(**peft_config)
100
+
101
+
102
+ ###############
103
+ # Setup logging
104
+ ###############
105
+ logging.basicConfig(
106
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
107
+ datefmt="%Y-%m-%d %H:%M:%S",
108
+ handlers=[logging.StreamHandler(sys.stdout)],
109
+ )
110
+ log_level = train_conf.get_process_log_level()
111
+ logger.setLevel(log_level)
112
+ datasets.utils.logging.set_verbosity(log_level)
113
+ transformers.utils.logging.set_verbosity(log_level)
114
+ transformers.utils.logging.enable_default_handler()
115
+ transformers.utils.logging.enable_explicit_format()
116
+
117
+ # Log on each process a small summary
118
+ logger.warning(
119
+ f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
120
+ + f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
121
+ )
122
+ logger.info(f"Training/evaluation parameters {train_conf}")
123
+ logger.info(f"PEFT parameters {peft_conf}")
124
+
125
 
126
  ################
127
  # Modle Loading
 
133
  trust_remote_code=True,
134
  attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
135
  torch_dtype=torch.bfloat16,
136
+ device_map=None
137
  )
138
  model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
139
  tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
140
+ tokenizer.model_max_length = 2048
141
  tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
142
  tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
143
  tokenizer.padding_side = 'right'
144
 
145
+
146
  ##################
147
  # Data Processing
148
  ##################
 
159
  return example
160
 
161
  raw_dataset = load_dataset("HuggingFaceH4/ultrachat_200k")
162
+ train_dataset = raw_dataset["train_sft"]
163
+ test_dataset = raw_dataset["test_sft"]
164
+ column_names = list(train_dataset.features)
165
 
166
+ processed_train_dataset = train_dataset.map(
167
  apply_chat_template,
168
  fn_kwargs={"tokenizer": tokenizer},
169
+ num_proc=10,
170
  remove_columns=column_names,
171
+ desc="Applying chat template to train_sft",
172
  )
173
+
174
+ processed_test_dataset = test_dataset.map(
175
+ apply_chat_template,
176
+ fn_kwargs={"tokenizer": tokenizer},
177
+ num_proc=10,
178
+ remove_columns=column_names,
179
+ desc="Applying chat template to test_sft",
180
+ )
181
+
182
 
183
  ###########
184
  # Training
185
  ###########
186
  trainer = SFTTrainer(
187
  model=model,
188
+ args=train_conf,
189
+ peft_config=peft_conf,
190
+ train_dataset=processed_train_dataset,
191
+ eval_dataset=processed_test_dataset,
192
  max_seq_length=2048,
193
  dataset_text_field="text",
194
  tokenizer=tokenizer,
 
200
  trainer.save_metrics("train", metrics)
201
  trainer.save_state()
202
 
203
+
204
  #############
205
  # Evaluation
206
  #############
207
  tokenizer.padding_side = 'left'
208
  metrics = trainer.evaluate()
209
+ metrics["eval_samples"] = len(processed_test_dataset)
210
  trainer.log_metrics("eval", metrics)
211
  trainer.save_metrics("eval", metrics)
212
 
213
+
214
+ # ############
215
+ # # Save model
216
+ # ############
217
+ trainer.save_model(train_conf.output_dir)
special_tokens_map.json CHANGED
@@ -1,7 +1,4 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|/inst|>"
4
- ],
5
  "bos_token": {
6
  "content": "<s>",
7
  "lstrip": false,
 
1
  {
 
 
 
2
  "bos_token": {
3
  "content": "<s>",
4
  "lstrip": false,
tokenizer.json CHANGED
@@ -26,9 +26,9 @@
26
  "content": "</s>",
27
  "single_word": false,
28
  "lstrip": false,
29
- "rstrip": true,
30
  "normalized": false,
31
- "special": false
32
  },
33
  {
34
  "id": 32000,
@@ -44,43 +44,43 @@
44
  "content": "<|assistant|>",
45
  "single_word": false,
46
  "lstrip": false,
47
- "rstrip": true,
48
  "normalized": false,
49
  "special": true
50
  },
51
  {
52
  "id": 32002,
53
- "content": "<|step|>",
54
  "single_word": false,
55
  "lstrip": false,
56
- "rstrip": true,
57
  "normalized": false,
58
  "special": true
59
  },
60
  {
61
  "id": 32003,
62
- "content": "<|function_output|>",
63
  "single_word": false,
64
  "lstrip": false,
65
- "rstrip": true,
66
  "normalized": false,
67
  "special": true
68
  },
69
  {
70
  "id": 32004,
71
- "content": "<|tag|>",
72
  "single_word": false,
73
  "lstrip": false,
74
- "rstrip": true,
75
  "normalized": false,
76
  "special": true
77
  },
78
  {
79
  "id": 32005,
80
- "content": "<|function_call|>",
81
  "single_word": false,
82
  "lstrip": false,
83
- "rstrip": true,
84
  "normalized": false,
85
  "special": true
86
  },
@@ -89,7 +89,7 @@
89
  "content": "<|system|>",
90
  "single_word": false,
91
  "lstrip": false,
92
- "rstrip": true,
93
  "normalized": false,
94
  "special": true
95
  },
@@ -98,25 +98,25 @@
98
  "content": "<|end|>",
99
  "single_word": false,
100
  "lstrip": false,
101
- "rstrip": true,
102
  "normalized": false,
103
  "special": true
104
  },
105
  {
106
  "id": 32008,
107
- "content": "<|raw|>",
108
  "single_word": false,
109
  "lstrip": false,
110
- "rstrip": true,
111
  "normalized": false,
112
  "special": true
113
  },
114
  {
115
  "id": 32009,
116
- "content": "<|continue|>",
117
  "single_word": false,
118
  "lstrip": false,
119
- "rstrip": true,
120
  "normalized": false,
121
  "special": true
122
  },
@@ -125,250 +125,7 @@
125
  "content": "<|user|>",
126
  "single_word": false,
127
  "lstrip": false,
128
- "rstrip": true,
129
- "normalized": false,
130
- "special": true
131
- },
132
- {
133
- "id": 32011,
134
- "content": "<|function_list|>",
135
- "single_word": false,
136
- "lstrip": false,
137
- "rstrip": true,
138
- "normalized": false,
139
- "special": true
140
- },
141
- {
142
- "id": 32012,
143
- "content": "<|calc|>",
144
- "single_word": false,
145
- "lstrip": false,
146
- "rstrip": true,
147
- "normalized": false,
148
- "special": true
149
- },
150
- {
151
- "id": 32013,
152
- "content": "<|code|>",
153
- "single_word": false,
154
- "lstrip": false,
155
- "rstrip": true,
156
- "normalized": false,
157
- "special": true
158
- },
159
- {
160
- "id": 32014,
161
- "content": "<|/code|>",
162
- "single_word": false,
163
- "lstrip": false,
164
- "rstrip": true,
165
- "normalized": false,
166
- "special": true
167
- },
168
- {
169
- "id": 32015,
170
- "content": "<|summary|>",
171
- "single_word": false,
172
- "lstrip": false,
173
- "rstrip": true,
174
- "normalized": false,
175
- "special": true
176
- },
177
- {
178
- "id": 32016,
179
- "content": "<|resource|>",
180
- "single_word": false,
181
- "lstrip": false,
182
- "rstrip": true,
183
- "normalized": false,
184
- "special": true
185
- },
186
- {
187
- "id": 32017,
188
- "content": "<|assistant_mask|>",
189
- "single_word": false,
190
- "lstrip": false,
191
- "rstrip": true,
192
- "normalized": false,
193
- "special": true
194
- },
195
- {
196
- "id": 32018,
197
- "content": "<|start|>",
198
- "single_word": false,
199
- "lstrip": false,
200
- "rstrip": true,
201
- "normalized": false,
202
- "special": true
203
- },
204
- {
205
- "id": 32019,
206
- "content": "<|message|>",
207
- "single_word": false,
208
- "lstrip": false,
209
- "rstrip": true,
210
- "normalized": false,
211
- "special": true
212
- },
213
- {
214
- "id": 32020,
215
- "content": "<|fim_prefix|>",
216
- "single_word": false,
217
- "lstrip": false,
218
- "rstrip": true,
219
- "normalized": false,
220
- "special": true
221
- },
222
- {
223
- "id": 32021,
224
- "content": "<|fim_middle|>",
225
- "single_word": false,
226
- "lstrip": false,
227
- "rstrip": true,
228
- "normalized": false,
229
- "special": true
230
- },
231
- {
232
- "id": 32022,
233
- "content": "<|fim_suffix|>",
234
- "single_word": false,
235
- "lstrip": false,
236
- "rstrip": true,
237
- "normalized": false,
238
- "special": true
239
- },
240
- {
241
- "id": 32023,
242
- "content": "<|meta_start|>",
243
- "single_word": false,
244
- "lstrip": false,
245
- "rstrip": true,
246
- "normalized": false,
247
- "special": true
248
- },
249
- {
250
- "id": 32024,
251
- "content": "<|ipynb_marker|>",
252
- "single_word": false,
253
- "lstrip": false,
254
- "rstrip": true,
255
- "normalized": false,
256
- "special": true
257
- },
258
- {
259
- "id": 32025,
260
- "content": "<|diff_marker|>",
261
- "single_word": false,
262
- "lstrip": false,
263
- "rstrip": true,
264
- "normalized": false,
265
- "special": true
266
- },
267
- {
268
- "id": 32026,
269
- "content": "<|ghissue|>",
270
- "single_word": false,
271
- "lstrip": false,
272
- "rstrip": true,
273
- "normalized": false,
274
- "special": true
275
- },
276
- {
277
- "id": 32027,
278
- "content": "<|ghreview|>",
279
- "single_word": false,
280
- "lstrip": false,
281
- "rstrip": true,
282
- "normalized": false,
283
- "special": true
284
- },
285
- {
286
- "id": 32028,
287
- "content": "<|disc_start|>",
288
- "single_word": false,
289
- "lstrip": false,
290
- "rstrip": true,
291
- "normalized": false,
292
- "special": true
293
- },
294
- {
295
- "id": 32029,
296
- "content": "<|disc_sep|>",
297
- "single_word": false,
298
- "lstrip": false,
299
- "rstrip": true,
300
- "normalized": false,
301
- "special": true
302
- },
303
- {
304
- "id": 32030,
305
- "content": "<|disc_thread|><|query|>",
306
- "single_word": false,
307
- "lstrip": false,
308
- "rstrip": true,
309
- "normalized": false,
310
- "special": true
311
- },
312
- {
313
- "id": 32031,
314
- "content": "<|/query|>",
315
- "single_word": false,
316
- "lstrip": false,
317
- "rstrip": true,
318
- "normalized": false,
319
- "special": true
320
- },
321
- {
322
- "id": 32032,
323
- "content": "<|data|>",
324
- "single_word": false,
325
- "lstrip": false,
326
- "rstrip": true,
327
- "normalized": false,
328
- "special": true
329
- },
330
- {
331
- "id": 32033,
332
- "content": "<|/data|>",
333
- "single_word": false,
334
- "lstrip": false,
335
- "rstrip": true,
336
- "normalized": false,
337
- "special": true
338
- },
339
- {
340
- "id": 32034,
341
- "content": "<|sys|>",
342
- "single_word": false,
343
- "lstrip": false,
344
- "rstrip": true,
345
- "normalized": false,
346
- "special": true
347
- },
348
- {
349
- "id": 32035,
350
- "content": "<|/sys|>",
351
- "single_word": false,
352
- "lstrip": false,
353
- "rstrip": true,
354
- "normalized": false,
355
- "special": true
356
- },
357
- {
358
- "id": 32036,
359
- "content": "<|inst|>",
360
- "single_word": false,
361
- "lstrip": false,
362
- "rstrip": true,
363
- "normalized": false,
364
- "special": true
365
- },
366
- {
367
- "id": 32037,
368
- "content": "<|/inst|>",
369
- "single_word": false,
370
- "lstrip": false,
371
- "rstrip": true,
372
  "normalized": false,
373
  "special": true
374
  }
 
26
  "content": "</s>",
27
  "single_word": false,
28
  "lstrip": false,
29
+ "rstrip": false,
30
  "normalized": false,
31
+ "special": true
32
  },
33
  {
34
  "id": 32000,
 
44
  "content": "<|assistant|>",
45
  "single_word": false,
46
  "lstrip": false,
47
+ "rstrip": false,
48
  "normalized": false,
49
  "special": true
50
  },
51
  {
52
  "id": 32002,
53
+ "content": "<|placeholder1|>",
54
  "single_word": false,
55
  "lstrip": false,
56
+ "rstrip": false,
57
  "normalized": false,
58
  "special": true
59
  },
60
  {
61
  "id": 32003,
62
+ "content": "<|placeholder2|>",
63
  "single_word": false,
64
  "lstrip": false,
65
+ "rstrip": false,
66
  "normalized": false,
67
  "special": true
68
  },
69
  {
70
  "id": 32004,
71
+ "content": "<|placeholder3|>",
72
  "single_word": false,
73
  "lstrip": false,
74
+ "rstrip": false,
75
  "normalized": false,
76
  "special": true
77
  },
78
  {
79
  "id": 32005,
80
+ "content": "<|placeholder4|>",
81
  "single_word": false,
82
  "lstrip": false,
83
+ "rstrip": false,
84
  "normalized": false,
85
  "special": true
86
  },
 
89
  "content": "<|system|>",
90
  "single_word": false,
91
  "lstrip": false,
92
+ "rstrip": false,
93
  "normalized": false,
94
  "special": true
95
  },
 
98
  "content": "<|end|>",
99
  "single_word": false,
100
  "lstrip": false,
101
+ "rstrip": false,
102
  "normalized": false,
103
  "special": true
104
  },
105
  {
106
  "id": 32008,
107
+ "content": "<|placeholder5|>",
108
  "single_word": false,
109
  "lstrip": false,
110
+ "rstrip": false,
111
  "normalized": false,
112
  "special": true
113
  },
114
  {
115
  "id": 32009,
116
+ "content": "<|placeholder6|>",
117
  "single_word": false,
118
  "lstrip": false,
119
+ "rstrip": false,
120
  "normalized": false,
121
  "special": true
122
  },
 
125
  "content": "<|user|>",
126
  "single_word": false,
127
  "lstrip": false,
128
+ "rstrip": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  "normalized": false,
130
  "special": true
131
  }
tokenizer_config.json CHANGED
@@ -22,9 +22,9 @@
22
  "content": "</s>",
23
  "lstrip": false,
24
  "normalized": false,
25
- "rstrip": true,
26
  "single_word": false,
27
- "special": false
28
  },
29
  "32000": {
30
  "content": "<|endoftext|>",
@@ -38,39 +38,39 @@
38
  "content": "<|assistant|>",
39
  "lstrip": false,
40
  "normalized": false,
41
- "rstrip": true,
42
  "single_word": false,
43
  "special": true
44
  },
45
  "32002": {
46
- "content": "<|step|>",
47
  "lstrip": false,
48
  "normalized": false,
49
- "rstrip": true,
50
  "single_word": false,
51
  "special": true
52
  },
53
  "32003": {
54
- "content": "<|function_output|>",
55
  "lstrip": false,
56
  "normalized": false,
57
- "rstrip": true,
58
  "single_word": false,
59
  "special": true
60
  },
61
  "32004": {
62
- "content": "<|tag|>",
63
  "lstrip": false,
64
  "normalized": false,
65
- "rstrip": true,
66
  "single_word": false,
67
  "special": true
68
  },
69
  "32005": {
70
- "content": "<|function_call|>",
71
  "lstrip": false,
72
  "normalized": false,
73
- "rstrip": true,
74
  "single_word": false,
75
  "special": true
76
  },
@@ -78,7 +78,7 @@
78
  "content": "<|system|>",
79
  "lstrip": false,
80
  "normalized": false,
81
- "rstrip": true,
82
  "single_word": false,
83
  "special": true
84
  },
@@ -86,23 +86,23 @@
86
  "content": "<|end|>",
87
  "lstrip": false,
88
  "normalized": false,
89
- "rstrip": true,
90
  "single_word": false,
91
  "special": true
92
  },
93
  "32008": {
94
- "content": "<|raw|>",
95
  "lstrip": false,
96
  "normalized": false,
97
- "rstrip": true,
98
  "single_word": false,
99
  "special": true
100
  },
101
  "32009": {
102
- "content": "<|continue|>",
103
  "lstrip": false,
104
  "normalized": false,
105
- "rstrip": true,
106
  "single_word": false,
107
  "special": true
108
  },
@@ -110,232 +110,13 @@
110
  "content": "<|user|>",
111
  "lstrip": false,
112
  "normalized": false,
113
- "rstrip": true,
114
- "single_word": false,
115
- "special": true
116
- },
117
- "32011": {
118
- "content": "<|function_list|>",
119
- "lstrip": false,
120
- "normalized": false,
121
- "rstrip": true,
122
- "single_word": false,
123
- "special": true
124
- },
125
- "32012": {
126
- "content": "<|calc|>",
127
- "lstrip": false,
128
- "normalized": false,
129
- "rstrip": true,
130
- "single_word": false,
131
- "special": true
132
- },
133
- "32013": {
134
- "content": "<|code|>",
135
- "lstrip": false,
136
- "normalized": false,
137
- "rstrip": true,
138
- "single_word": false,
139
- "special": true
140
- },
141
- "32014": {
142
- "content": "<|/code|>",
143
- "lstrip": false,
144
- "normalized": false,
145
- "rstrip": true,
146
- "single_word": false,
147
- "special": true
148
- },
149
- "32015": {
150
- "content": "<|summary|>",
151
- "lstrip": false,
152
- "normalized": false,
153
- "rstrip": true,
154
- "single_word": false,
155
- "special": true
156
- },
157
- "32016": {
158
- "content": "<|resource|>",
159
- "lstrip": false,
160
- "normalized": false,
161
- "rstrip": true,
162
- "single_word": false,
163
- "special": true
164
- },
165
- "32017": {
166
- "content": "<|assistant_mask|>",
167
- "lstrip": false,
168
- "normalized": false,
169
- "rstrip": true,
170
- "single_word": false,
171
- "special": true
172
- },
173
- "32018": {
174
- "content": "<|start|>",
175
- "lstrip": false,
176
- "normalized": false,
177
- "rstrip": true,
178
- "single_word": false,
179
- "special": true
180
- },
181
- "32019": {
182
- "content": "<|message|>",
183
- "lstrip": false,
184
- "normalized": false,
185
- "rstrip": true,
186
- "single_word": false,
187
- "special": true
188
- },
189
- "32020": {
190
- "content": "<|fim_prefix|>",
191
- "lstrip": false,
192
- "normalized": false,
193
- "rstrip": true,
194
- "single_word": false,
195
- "special": true
196
- },
197
- "32021": {
198
- "content": "<|fim_middle|>",
199
- "lstrip": false,
200
- "normalized": false,
201
- "rstrip": true,
202
- "single_word": false,
203
- "special": true
204
- },
205
- "32022": {
206
- "content": "<|fim_suffix|>",
207
- "lstrip": false,
208
- "normalized": false,
209
- "rstrip": true,
210
- "single_word": false,
211
- "special": true
212
- },
213
- "32023": {
214
- "content": "<|meta_start|>",
215
- "lstrip": false,
216
- "normalized": false,
217
- "rstrip": true,
218
- "single_word": false,
219
- "special": true
220
- },
221
- "32024": {
222
- "content": "<|ipynb_marker|>",
223
- "lstrip": false,
224
- "normalized": false,
225
- "rstrip": true,
226
- "single_word": false,
227
- "special": true
228
- },
229
- "32025": {
230
- "content": "<|diff_marker|>",
231
- "lstrip": false,
232
- "normalized": false,
233
- "rstrip": true,
234
- "single_word": false,
235
- "special": true
236
- },
237
- "32026": {
238
- "content": "<|ghissue|>",
239
- "lstrip": false,
240
- "normalized": false,
241
- "rstrip": true,
242
- "single_word": false,
243
- "special": true
244
- },
245
- "32027": {
246
- "content": "<|ghreview|>",
247
- "lstrip": false,
248
- "normalized": false,
249
- "rstrip": true,
250
- "single_word": false,
251
- "special": true
252
- },
253
- "32028": {
254
- "content": "<|disc_start|>",
255
- "lstrip": false,
256
- "normalized": false,
257
- "rstrip": true,
258
- "single_word": false,
259
- "special": true
260
- },
261
- "32029": {
262
- "content": "<|disc_sep|>",
263
- "lstrip": false,
264
- "normalized": false,
265
- "rstrip": true,
266
- "single_word": false,
267
- "special": true
268
- },
269
- "32030": {
270
- "content": "<|disc_thread|><|query|>",
271
- "lstrip": false,
272
- "normalized": false,
273
- "rstrip": true,
274
- "single_word": false,
275
- "special": true
276
- },
277
- "32031": {
278
- "content": "<|/query|>",
279
- "lstrip": false,
280
- "normalized": false,
281
- "rstrip": true,
282
- "single_word": false,
283
- "special": true
284
- },
285
- "32032": {
286
- "content": "<|data|>",
287
- "lstrip": false,
288
- "normalized": false,
289
- "rstrip": true,
290
- "single_word": false,
291
- "special": true
292
- },
293
- "32033": {
294
- "content": "<|/data|>",
295
- "lstrip": false,
296
- "normalized": false,
297
- "rstrip": true,
298
- "single_word": false,
299
- "special": true
300
- },
301
- "32034": {
302
- "content": "<|sys|>",
303
- "lstrip": false,
304
- "normalized": false,
305
- "rstrip": true,
306
- "single_word": false,
307
- "special": true
308
- },
309
- "32035": {
310
- "content": "<|/sys|>",
311
- "lstrip": false,
312
- "normalized": false,
313
- "rstrip": true,
314
- "single_word": false,
315
- "special": true
316
- },
317
- "32036": {
318
- "content": "<|inst|>",
319
- "lstrip": false,
320
- "normalized": false,
321
- "rstrip": true,
322
- "single_word": false,
323
- "special": true
324
- },
325
- "32037": {
326
- "content": "<|/inst|>",
327
- "lstrip": false,
328
- "normalized": false,
329
- "rstrip": true,
330
  "single_word": false,
331
  "special": true
332
  }
333
  },
334
- "additional_special_tokens": [
335
- "<|/inst|>"
336
- ],
337
  "bos_token": "<s>",
338
- "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|system|>' + '\n' + message['content'] + '<|end|>' + '\n'}}{% elif (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif message['role'] == 'assistant' %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
339
  "clean_up_tokenization_spaces": false,
340
  "eos_token": "<|endoftext|>",
341
  "legacy": false,
 
22
  "content": "</s>",
23
  "lstrip": false,
24
  "normalized": false,
25
+ "rstrip": false,
26
  "single_word": false,
27
+ "special": true
28
  },
29
  "32000": {
30
  "content": "<|endoftext|>",
 
38
  "content": "<|assistant|>",
39
  "lstrip": false,
40
  "normalized": false,
41
+ "rstrip": false,
42
  "single_word": false,
43
  "special": true
44
  },
45
  "32002": {
46
+ "content": "<|placeholder1|>",
47
  "lstrip": false,
48
  "normalized": false,
49
+ "rstrip": false,
50
  "single_word": false,
51
  "special": true
52
  },
53
  "32003": {
54
+ "content": "<|placeholder2|>",
55
  "lstrip": false,
56
  "normalized": false,
57
+ "rstrip": false,
58
  "single_word": false,
59
  "special": true
60
  },
61
  "32004": {
62
+ "content": "<|placeholder3|>",
63
  "lstrip": false,
64
  "normalized": false,
65
+ "rstrip": false,
66
  "single_word": false,
67
  "special": true
68
  },
69
  "32005": {
70
+ "content": "<|placeholder4|>",
71
  "lstrip": false,
72
  "normalized": false,
73
+ "rstrip": false,
74
  "single_word": false,
75
  "special": true
76
  },
 
78
  "content": "<|system|>",
79
  "lstrip": false,
80
  "normalized": false,
81
+ "rstrip": false,
82
  "single_word": false,
83
  "special": true
84
  },
 
86
  "content": "<|end|>",
87
  "lstrip": false,
88
  "normalized": false,
89
+ "rstrip": false,
90
  "single_word": false,
91
  "special": true
92
  },
93
  "32008": {
94
+ "content": "<|placeholder5|>",
95
  "lstrip": false,
96
  "normalized": false,
97
+ "rstrip": false,
98
  "single_word": false,
99
  "special": true
100
  },
101
  "32009": {
102
+ "content": "<|placeholder6|>",
103
  "lstrip": false,
104
  "normalized": false,
105
+ "rstrip": false,
106
  "single_word": false,
107
  "special": true
108
  },
 
110
  "content": "<|user|>",
111
  "lstrip": false,
112
  "normalized": false,
113
+ "rstrip": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  "single_word": false,
115
  "special": true
116
  }
117
  },
 
 
 
118
  "bos_token": "<s>",
119
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
120
  "clean_up_tokenization_spaces": false,
121
  "eos_token": "<|endoftext|>",
122
  "legacy": false,