rcrajat commited on
Commit
67c3664
1 Parent(s): cff52ad

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 14336,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 8,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": {
19
+ "factor": 8.0,
20
+ "original_max_position_embeddings": 4096,
21
+ "type": "yarn"
22
+ },
23
+ "rope_theta": 10000.0,
24
+ "sliding_window": 4096,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "4.35.0",
28
+ "use_cache": false,
29
+ "vocab_size": 32001
30
+ }
.ipynb_checkpoints/generation_config-checkpoint.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.35.0"
6
+ }
.ipynb_checkpoints/zero_to_fp32-checkpoint.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage <= 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage <= 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage <= 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 32000
3
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 14336,
12
+ "max_position_embeddings": 32768,
13
+ "model_type": "mistral",
14
+ "num_attention_heads": 32,
15
+ "num_hidden_layers": 32,
16
+ "num_key_value_heads": 8,
17
+ "rms_norm_eps": 1e-05,
18
+ "rope_scaling": {
19
+ "factor": 8.0,
20
+ "original_max_position_embeddings": 4096,
21
+ "type": "yarn"
22
+ },
23
+ "rope_theta": 10000.0,
24
+ "sliding_window": 4096,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "4.35.0",
28
+ "use_cache": false,
29
+ "vocab_size": 32001
30
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.35.0"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb4bf787a14c97d4a8dad27f231313f6b28af9f7f24ebe5a38e52627d3fa7371
3
+ size 4943170432
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3f4afbfbb4fbbc9a956513c32257cc883ca727dc6f0674fa4673e5db3e58095
3
+ size 4999819232
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a500aa07f4547e4886c7ec307c4ca508b881d1be6e2617677f2762d79152bead
3
+ size 4540524448
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483480576
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
my_configuration_mistral.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Modification Copyright 2023 Dawei Zhu
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+ """ Mistral model configuration"""
22
+
23
+ # from ...configuration_utils import PretrainedConfig
24
+ # from ...utils import logging
25
+ from transformers.configuration_utils import PretrainedConfig
26
+ from transformers.utils import logging
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+ # MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
31
+ # "mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json",
32
+ # "mistralai/Mistral-7B-Instruct-v0.1": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json",
33
+ # }
34
+ # MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
35
+
36
+ class MistralConfig(PretrainedConfig):
37
+ r"""
38
+ This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an Mistral
39
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
40
+ defaults will yield a similar configuration to that of the Mistral-7B.
41
+
42
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
43
+ documentation from [`PretrainedConfig`] for more information.
44
+
45
+
46
+ Args:
47
+ vocab_size (`int`, *optional*, defaults to 32000):
48
+ Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
49
+ `inputs_ids` passed when calling [`MistralModel`]
50
+ hidden_size (`int`, *optional*, defaults to 4096):
51
+ Dimension of the hidden representations.
52
+ intermediate_size (`int`, *optional*, defaults to 11008):
53
+ Dimension of the MLP representations.
54
+ num_hidden_layers (`int`, *optional*, defaults to 32):
55
+ Number of hidden layers in the Transformer encoder.
56
+ num_attention_heads (`int`, *optional*, defaults to 32):
57
+ Number of attention heads for each attention layer in the Transformer encoder.
58
+ num_key_value_heads (`int`, *optional*):
59
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
60
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
61
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
62
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
63
+ by meanpooling all the original heads within that group. For more details checkout [this
64
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
65
+ `num_attention_heads`.
66
+ pretraining_tp (`int`, *optional*, defaults to `1`):
67
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
68
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
69
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
70
+ issue](https://github.com/pytorch/pytorch/issues/76232).
71
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
72
+ The non-linear activation function (function or string) in the decoder.
73
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
74
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
75
+ just in case (e.g., 512 or 1024 or 2048).
76
+ initializer_range (`float`, *optional*, defaults to 0.02):
77
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
78
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
79
+ The epsilon used by the rms normalization layers.
80
+ use_cache (`bool`, *optional*, defaults to `True`):
81
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
82
+ relevant if `config.is_decoder=True`.
83
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
84
+ Whether to tie weight embeddings
85
+ rope_scaling (`Dict`, *optional*):
86
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling
87
+ strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format
88
+ is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
89
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
90
+ these scaling strategies behave:
91
+ https://www.reddit.com/r/LocalMistral/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
92
+ experimental feature, subject to breaking API changes in future versions.
93
+
94
+ Example:
95
+
96
+ ```python
97
+ >>> from transformers import MistralModel, MistralConfig
98
+
99
+ >>> # Initializing a Mistral Mistral-7b style configuration
100
+ >>> configuration = MistralConfig()
101
+
102
+ >>> # Initializing a model from the Mistral-7b style configuration
103
+ >>> model = MistralModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+ model_type = "mistral"
109
+ keys_to_ignore_at_inference = ["past_key_values"]
110
+
111
+ def __init__(
112
+ self,
113
+ vocab_size=32000,
114
+ hidden_size=4096,
115
+ intermediate_size=14336,
116
+ num_hidden_layers=32,
117
+ num_attention_heads=32,
118
+ num_key_value_heads=8,
119
+ hidden_act="silu",
120
+ max_position_embeddings=2048,
121
+ initializer_range=0.02,
122
+ rms_norm_eps=1e-6,
123
+ use_cache=True,
124
+ pad_token_id=None,
125
+ bos_token_id=1,
126
+ eos_token_id=2,
127
+ pretraining_tp=1,
128
+ tie_word_embeddings=False,
129
+ rope_scaling=None,
130
+ rope_theta=10000.0,
131
+ sliding_window=4096,
132
+ **kwargs,
133
+ ):
134
+ self.vocab_size = vocab_size
135
+ self.max_position_embeddings = max_position_embeddings
136
+ self.hidden_size = hidden_size
137
+ self.intermediate_size = intermediate_size
138
+ self.num_hidden_layers = num_hidden_layers
139
+ self.num_attention_heads = num_attention_heads
140
+ self.sliding_window = sliding_window
141
+
142
+
143
+ # for backward compatibility
144
+ if num_key_value_heads is None:
145
+ num_key_value_heads = num_attention_heads
146
+
147
+ self.num_key_value_heads = num_key_value_heads
148
+ self.hidden_act = hidden_act
149
+ self.initializer_range = initializer_range
150
+ self.rms_norm_eps = rms_norm_eps
151
+ self.use_cache = use_cache
152
+ self.rope_scaling = rope_scaling
153
+ self._rope_scaling_validation()
154
+ self.rope_theta = rope_theta
155
+
156
+ super().__init__(
157
+ pad_token_id=pad_token_id,
158
+ bos_token_id=bos_token_id,
159
+ eos_token_id=eos_token_id,
160
+ tie_word_embeddings=tie_word_embeddings,
161
+ **kwargs,
162
+ )
163
+
164
+ def _rope_scaling_validation(self):
165
+ """
166
+ Validate the `rope_scaling` configuration.
167
+ """
168
+ if self.rope_scaling is None:
169
+ return
170
+
171
+ if not isinstance(self.rope_scaling, dict):
172
+ raise ValueError(
173
+ "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
174
+ f"got {self.rope_scaling}"
175
+ )
176
+ rope_scaling_type = self.rope_scaling.get("type", None)
177
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
178
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic", "vanilla_ntk", "yarn"]:
179
+ raise ValueError(
180
+ f"`rope_scaling`'s name field must be one of ['linear', 'dynamic', 'vanilla_ntk', 'yarn'], got {rope_scaling_type}"
181
+ )
182
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
183
+ raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
my_modeling_mistral.py ADDED
@@ -0,0 +1,1187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 MistralAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch Mistral model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+ from pathlib import Path
24
+
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn.functional as F
28
+ import torch.utils.checkpoint
29
+ from torch import nn
30
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
31
+
32
+ from transformers.activations import ACT2FN
33
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
34
+ from transformers.modeling_utils import PreTrainedModel
35
+ from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
36
+
37
+ from transformers.models.mistral.configuration_mistral import MistralConfig
38
+
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CONFIG_FOR_DOC = "MistralConfig"
44
+
45
+
46
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
47
+ def _make_causal_mask(
48
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
49
+ ):
50
+ """
51
+ Make causal mask used for bi-directional self-attention.
52
+ """
53
+ bsz, tgt_len = input_ids_shape
54
+ mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
55
+ mask_cond = torch.arange(mask.size(-1), device=device)
56
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
57
+ mask = mask.to(dtype)
58
+
59
+ if past_key_values_length > 0:
60
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
61
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
62
+
63
+
64
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
65
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
66
+ """
67
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
68
+ """
69
+ bsz, src_len = mask.size()
70
+ tgt_len = tgt_len if tgt_len is not None else src_len
71
+
72
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
73
+
74
+ inverted_mask = 1.0 - expanded_mask
75
+
76
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
77
+
78
+ def _make_sliding_window_causal_mask(
79
+ input_ids_shape: torch.Size,
80
+ dtype: torch.dtype,
81
+ device: torch.device,
82
+ past_key_values_length: int = 0,
83
+ sliding_window: int = 4096,
84
+ ):
85
+ """
86
+ Make causal mask used for sliding window attention
87
+ """
88
+ bsz, tgt_len = input_ids_shape
89
+ tensor = torch.full(
90
+ (tgt_len, tgt_len),
91
+ fill_value=1,
92
+ device=device,
93
+ )
94
+ mask = torch.tril(tensor, diagonal=0)
95
+ # make the mask banded to account for sliding window
96
+ mask = torch.triu(mask, diagonal=-sliding_window)
97
+ mask = torch.log(mask).to(dtype)
98
+ if past_key_values_length > 0:
99
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
100
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
101
+
102
+
103
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Mistral
104
+ class MistralRMSNorm(torch.nn.Module):
105
+ def __init__(self, hidden_size, eps=1e-6):
106
+ """
107
+ MistralRMSNorm is equivalent to T5LayerNorm
108
+ """
109
+ super().__init__()
110
+ self.weight = nn.Parameter(torch.ones(hidden_size))
111
+ self.variance_epsilon = eps
112
+
113
+ def forward(self, hidden_states):
114
+ input_dtype = hidden_states.dtype
115
+ hidden_states = hidden_states.to(torch.float32)
116
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
117
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
118
+ return self.weight * hidden_states.to(input_dtype)
119
+
120
+
121
+ # Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Mistral
122
+ class MistralRotaryEmbedding(torch.nn.Module):
123
+ def __init__(self, dim, max_position_embeddings=4096, base=10000, device=None):
124
+ super().__init__()
125
+
126
+ self.dim = dim
127
+ self.max_position_embeddings = max_position_embeddings
128
+ self.base = base
129
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
130
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
131
+
132
+ # Build here to make `torch.jit.trace` work.
133
+ self._set_cos_sin_cache(
134
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
135
+ )
136
+
137
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
138
+ self.max_seq_len_cached = seq_len
139
+ # t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
140
+ t = np.arange(self.max_seq_len_cached, dtype=np.float64)
141
+ t = torch.tensor(t, device=self.inv_freq.device, dtype=torch.float64)
142
+
143
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
144
+ freqs = torch.outer(t, self.inv_freq.to(device=t.device).to(t.dtype))
145
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
146
+ emb = torch.cat((freqs, freqs), dim=-1)
147
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
148
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
149
+
150
+ def forward(self, x, seq_len=None):
151
+ # x: [bs, num_attention_heads, seq_len, head_size]
152
+ if seq_len > self.max_seq_len_cached:
153
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
154
+
155
+ return (
156
+ self.cos_cached[:, :, :, ...].to(dtype=x.dtype),
157
+ self.sin_cached[:, :, :, ...].to(dtype=x.dtype),
158
+ )
159
+
160
+ class MistralLinearScalingRotaryEmbedding(MistralRotaryEmbedding):
161
+ """MistralRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
162
+
163
+ def __init__(self, dim, max_position_embeddings=4096, base=10000, device=None, scaling_factor=1.0):
164
+ self.scaling_factor = scaling_factor
165
+ super().__init__(dim, max_position_embeddings, base, device)
166
+
167
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
168
+ self.max_seq_len_cached = seq_len
169
+
170
+ t = np.arange(self.max_seq_len_cached, dtype=np.float64)
171
+ t = t / self.scaling_factor
172
+ t = torch.tensor(t, device=self.inv_freq.device, dtype=torch.float64)
173
+
174
+ freqs = torch.outer(t, self.inv_freq.to(device=t.device).to(t.dtype))
175
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
176
+ emb = torch.cat((freqs, freqs), dim=-1)
177
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
178
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
179
+
180
+ class MistralVanillaNTKScalingRotaryEmbedding(MistralRotaryEmbedding):
181
+
182
+ def __init__(self, dim, max_position_embeddings=4096, base=10000, device=None, scaling_factor=1.0):
183
+ self.scaling_factor = scaling_factor
184
+ super().__init__(dim, max_position_embeddings, base, device)
185
+
186
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
187
+ self.max_seq_len_cached = seq_len
188
+
189
+ base = self.base * self.scaling_factor ** (self.dim / (self.dim - 2))
190
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
191
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
192
+
193
+ t = np.arange(self.max_seq_len_cached, dtype=np.float64)
194
+ t = torch.tensor(t, device=self.inv_freq.device, dtype=torch.float64)
195
+
196
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
197
+ freqs = torch.outer(t, self.inv_freq.to(device=t.device).to(t.dtype))
198
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
199
+ emb = torch.cat((freqs, freqs), dim=-1)
200
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
201
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
202
+
203
+ class MistralDynamicNTKScalingRotaryEmbedding(MistralRotaryEmbedding):
204
+ """MistralRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
205
+
206
+ def __init__(self, dim, max_position_embeddings=4096, base=10000, device=None, scaling_factor=1.0):
207
+ self.scaling_factor = scaling_factor
208
+ super().__init__(dim, max_position_embeddings, base, device)
209
+
210
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
211
+ self.max_seq_len_cached = seq_len
212
+
213
+ if seq_len > self.max_position_embeddings:
214
+ base = self.base * (
215
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
216
+ ) ** (self.dim / (self.dim - 2))
217
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
218
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
219
+
220
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
221
+
222
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
223
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
224
+ emb = torch.cat((freqs, freqs), dim=-1)
225
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
226
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
227
+
228
+
229
+ # Inverse dim formula to find dim based on number of rotations
230
+ def _yarn_find_correction_dim(num_rotations, dim, base=10000, max_position_embeddings=4096):
231
+ return (dim * math.log(max_position_embeddings/(num_rotations * 2 * math.pi)))/(2 * math.log(base))
232
+
233
+ # Find dim range bounds based on rotations
234
+ def _yarn_find_correction_range(low_rot, high_rot, dim, base=10000, max_position_embeddings=4096):
235
+ low = math.floor(_yarn_find_correction_dim(
236
+ low_rot, dim, base, max_position_embeddings))
237
+ high = math.ceil(_yarn_find_correction_dim(
238
+ high_rot, dim, base, max_position_embeddings))
239
+ return max(low, 0), min(high, dim-1) # Clamp values just in case
240
+
241
+ def _yarn_linear_ramp_mask(min, max, dim):
242
+ if min == max:
243
+ max += 0.001 # Prevent singularity
244
+
245
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
246
+ ramp_func = torch.clamp(linear_func, 0, 1)
247
+ return ramp_func
248
+
249
+ def _yarn_get_mscale(scale=1):
250
+ if scale <= 1:
251
+ return 1.0
252
+ return 0.07 * math.log(scale) + 1.0
253
+
254
+
255
+ class MistralYaRNScaledRotaryEmbedding(torch.nn.Module):
256
+ def __init__(self, dim, max_position_embeddings=4096, base=10000, scale=1, original_max_position_embeddings=4096, extrapolation_factor=1, attn_factor=1, beta_fast=32, beta_slow=1, finetuned=False, device=None):
257
+ super().__init__()
258
+
259
+ self.dim = dim
260
+ self.max_position_embeddings = max_position_embeddings
261
+ self.base = base
262
+ self.scale = scale
263
+ self.original_max_position_embeddings = original_max_position_embeddings
264
+ self.extrapolation_factor = extrapolation_factor
265
+ self.attn_factor = attn_factor
266
+ self.beta_fast = beta_fast
267
+ self.beta_slow = beta_slow
268
+
269
+ # self.yarn(device)
270
+ self.revised_yarn(device)
271
+
272
+ # Build here to make `torch.jit.trace` work.
273
+ self.max_seq_len_cached = max_position_embeddings
274
+
275
+ t = np.arange(self.max_seq_len_cached, dtype=np.float64)
276
+ t = torch.tensor(t, device=self.inv_freq.device, dtype=torch.float64)
277
+ freqs = torch.outer(t, self.inv_freq.to(device=t.device).to(t.dtype))
278
+ # t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
279
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
280
+
281
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
282
+ emb = torch.cat((freqs, freqs), dim=-1)
283
+ dtype = torch.get_default_dtype()
284
+
285
+ self.register_buffer("cos_cached", (emb.cos() * self.mscale)[None, None, :, :].to(dtype), persistent=False)
286
+ self.register_buffer("sin_cached", (emb.sin() * self.mscale)[None, None, :, :].to(dtype), persistent=False)
287
+
288
+ def forward(self, x, seq_len=None):
289
+ # x: [bs, num_attention_heads, seq_len, head_size]
290
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
291
+ if seq_len > self.max_seq_len_cached:
292
+ print("*****notice******")
293
+ self.max_seq_len_cached = seq_len
294
+
295
+ t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
296
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
297
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
298
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
299
+
300
+ self.register_buffer("cos_cached", (emb.cos() * self.mscale)[None, None, :, :].to(x.dtype), persistent=False)
301
+ self.register_buffer("sin_cached", (emb.sin() * self.mscale)[None, None, :, :].to(x.dtype), persistent=False)
302
+ return (
303
+ self.cos_cached[:, :, :, ...].to(dtype=x.dtype),
304
+ self.sin_cached[:, :, :, ...].to(dtype=x.dtype),
305
+ )
306
+
307
+ def yarn(self, device):
308
+ pos_freqs = self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)
309
+ inv_freq_extrapolation = 1.0 / pos_freqs
310
+ inv_freq_interpolation = 1.0 / (self.scale * pos_freqs)
311
+
312
+ low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, self.original_max_position_embeddings)
313
+ inv_freq_mask = (1 - _yarn_linear_ramp_mask(low, high, self.dim // 2).float().to(device)) * self.extrapolation_factor # Get n-d rotational scaling corrected for extrapolation
314
+ inv_freq = inv_freq_interpolation * (1 - inv_freq_mask) + inv_freq_extrapolation * inv_freq_mask
315
+
316
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
317
+ self.mscale = float(_yarn_get_mscale(self.scale) * self.attn_factor) # Get n-d magnitude scaling corrected for interpolation
318
+
319
+ def revised_yarn(self, device):
320
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
321
+
322
+ low, high = _yarn_find_correction_range(self.beta_fast, self.beta_slow, self.dim, self.base, self.original_max_position_embeddings)
323
+ inv_freq_mask = (1 - _yarn_linear_ramp_mask(low, high, self.dim // 2).float().to(device)) * self.extrapolation_factor
324
+
325
+ inv_freq = inv_freq / ((1-inv_freq_mask)*self.scale + inv_freq_mask)
326
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
327
+ self.mscale = float(_yarn_get_mscale(self.scale) * self.attn_factor)
328
+
329
+
330
+
331
+
332
+ def rotate_half(x):
333
+ """Rotates half the hidden dims of the input."""
334
+ x1 = x[..., : x.shape[-1] // 2]
335
+ x2 = x[..., x.shape[-1] // 2 :]
336
+ return torch.cat((-x2, x1), dim=-1)
337
+
338
+
339
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
340
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
341
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
342
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
343
+ cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
344
+ sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
345
+ q_embed = (q * cos) + (rotate_half(q) * sin)
346
+ k_embed = (k * cos) + (rotate_half(k) * sin)
347
+ return q_embed, k_embed
348
+
349
+
350
+ class MistralMLP(nn.Module):
351
+ def __init__(self, config):
352
+ super().__init__()
353
+ self.config = config
354
+ self.hidden_size = config.hidden_size
355
+ self.intermediate_size = config.intermediate_size
356
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
357
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
358
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
359
+ self.act_fn = ACT2FN[config.hidden_act]
360
+
361
+ def forward(self, x):
362
+
363
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
364
+
365
+
366
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
367
+ """
368
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
369
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
370
+ """
371
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
372
+ if n_rep == 1:
373
+ return hidden_states
374
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
375
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
376
+
377
+
378
+ class MistralAttention(nn.Module):
379
+ """Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
380
+ and "Generating Long Sequences with Sparse Transformers"."""
381
+
382
+ def __init__(self, config: MistralConfig):
383
+ super().__init__()
384
+ self.config = config
385
+ self.hidden_size = config.hidden_size
386
+ self.num_heads = config.num_attention_heads
387
+ self.head_dim = self.hidden_size // self.num_heads
388
+ self.num_key_value_heads = config.num_key_value_heads
389
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
390
+ self.max_position_embeddings = config.max_position_embeddings
391
+
392
+ if (self.head_dim * self.num_heads) != self.hidden_size:
393
+ raise ValueError(
394
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
395
+ f" and `num_heads`: {self.num_heads})."
396
+ )
397
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
398
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
399
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
400
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
401
+ self._init_rope()
402
+
403
+ def _init_rope(self):
404
+ if self.config.rope_scaling is None:
405
+ self.rotary_emb = MistralRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
406
+ else:
407
+ scaling_type = self.config.rope_scaling["type"]
408
+ scaling_factor = self.config.rope_scaling["factor"]
409
+ if scaling_type == "linear":
410
+ self.rotary_emb = MistralLinearScalingRotaryEmbedding(
411
+ self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor
412
+ )
413
+ elif scaling_type == "dynamic":
414
+ self.rotary_emb = MistralDynamicNTKScalingRotaryEmbedding(
415
+ self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor
416
+ )
417
+ elif scaling_type == "vanilla_ntk":
418
+ self.rotary_emb = MistralVanillaNTKScalingRotaryEmbedding(
419
+ self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor
420
+ )
421
+ elif scaling_type == "yarn":
422
+ original_max_position_embeddings = self.config.rope_scaling["original_max_position_embeddings"]
423
+ self.rotary_emb = MistralYaRNScaledRotaryEmbedding(
424
+ self.head_dim, max_position_embeddings=self.max_position_embeddings, scale=scaling_factor, original_max_position_embeddings=original_max_position_embeddings
425
+ )
426
+ else:
427
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
428
+
429
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
430
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
431
+
432
+ def forward(
433
+ self,
434
+ hidden_states: torch.Tensor,
435
+ attention_mask: Optional[torch.Tensor] = None,
436
+ position_ids: Optional[torch.LongTensor] = None,
437
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
438
+ output_attentions: bool = False,
439
+ use_cache: bool = False,
440
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
441
+ bsz, q_len, _ = hidden_states.size()
442
+
443
+ query_states = self.q_proj(hidden_states)
444
+ key_states = self.k_proj(hidden_states)
445
+ value_states = self.v_proj(hidden_states)
446
+
447
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
448
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
449
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
450
+
451
+ kv_seq_len = key_states.shape[-2]
452
+ if past_key_value is not None:
453
+ kv_seq_len += past_key_value[0].shape[-2]
454
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
455
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
456
+ have_past_key_value = past_key_value is not None
457
+ if past_key_value is not None:
458
+ # reuse k, v, self_attention
459
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
460
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
461
+
462
+ past_key_value = (key_states, value_states) if use_cache else None
463
+
464
+ # repeat k/v heads if n_kv_heads < n_heads
465
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
466
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
467
+
468
+
469
+ use_xformer = True
470
+
471
+ if not use_xformer or have_past_key_value:
472
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
473
+
474
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
475
+ raise ValueError(
476
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
477
+ f" {attn_weights.size()}"
478
+ )
479
+
480
+ if attention_mask is not None:
481
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
482
+ raise ValueError(
483
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
484
+ )
485
+ attn_weights = attn_weights + attention_mask
486
+
487
+ # upcast attention to fp32
488
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
489
+ attn_output = torch.matmul(attn_weights, value_states)
490
+
491
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
492
+ raise ValueError(
493
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
494
+ f" {attn_output.size()}"
495
+ )
496
+
497
+ attn_output = attn_output.transpose(1, 2).contiguous()
498
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
499
+ else:
500
+ import xformers.ops as xops
501
+ attn_weights = None
502
+ #attn_bias = attention_mask.expand(-1, self.num_heads, -1, -1)
503
+ attn_bias=xops.LowerTriangularMask()
504
+ attn_output = xops.memory_efficient_attention(
505
+ query_states.transpose(1,2), key_states.transpose(1,2), value_states.transpose(1,2),
506
+ attn_bias=attn_bias,
507
+ ).reshape(bsz, q_len, self.hidden_size)
508
+
509
+
510
+ attn_output = self.o_proj(attn_output)
511
+
512
+ if not output_attentions:
513
+ attn_weights = None
514
+
515
+ return attn_output, attn_weights, past_key_value
516
+
517
+
518
+ class MistralDecoderLayer(nn.Module):
519
+ def __init__(self, config: MistralConfig):
520
+ super().__init__()
521
+ self.hidden_size = config.hidden_size
522
+ self.self_attn = MistralAttention(config=config)
523
+ self.mlp = MistralMLP(config)
524
+ self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
525
+ self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
526
+
527
+ def forward(
528
+ self,
529
+ hidden_states: torch.Tensor,
530
+ attention_mask: Optional[torch.Tensor] = None,
531
+ position_ids: Optional[torch.LongTensor] = None,
532
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
533
+ output_attentions: Optional[bool] = False,
534
+ use_cache: Optional[bool] = False,
535
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
536
+ """
537
+ Args:
538
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
539
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
540
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
541
+ output_attentions (`bool`, *optional*):
542
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
543
+ returned tensors for more detail.
544
+ use_cache (`bool`, *optional*):
545
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
546
+ (see `past_key_values`).
547
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
548
+ """
549
+
550
+ residual = hidden_states
551
+
552
+ hidden_states = self.input_layernorm(hidden_states)
553
+
554
+ # Self Attention
555
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
556
+ hidden_states=hidden_states,
557
+ attention_mask=attention_mask,
558
+ position_ids=position_ids,
559
+ past_key_value=past_key_value,
560
+ output_attentions=output_attentions,
561
+ use_cache=use_cache,
562
+ )
563
+ hidden_states = residual + hidden_states
564
+
565
+ # Fully Connected
566
+ residual = hidden_states
567
+ hidden_states = self.post_attention_layernorm(hidden_states)
568
+ hidden_states = self.mlp(hidden_states)
569
+ hidden_states = residual + hidden_states
570
+
571
+ outputs = (hidden_states,)
572
+
573
+ if output_attentions:
574
+ outputs += (self_attn_weights,)
575
+
576
+ if use_cache:
577
+ outputs += (present_key_value,)
578
+
579
+ return outputs
580
+
581
+
582
+ MISTRAL_START_DOCSTRING = r"""
583
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
584
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
585
+ etc.)
586
+
587
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
588
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
589
+ and behavior.
590
+
591
+ Parameters:
592
+ config ([`MistralConfig`]):
593
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
594
+ load the weights associated with the model, only the configuration. Check out the
595
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
596
+ """
597
+
598
+
599
+ @add_start_docstrings(
600
+ "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
601
+ MISTRAL_START_DOCSTRING,
602
+ )
603
+ class MistralPreTrainedModel(PreTrainedModel):
604
+ config_class = MistralConfig
605
+ base_model_prefix = "model"
606
+ supports_gradient_checkpointing = True
607
+ _no_split_modules = ["MistralDecoderLayer"]
608
+ _skip_keys_device_placement = "past_key_values"
609
+
610
+ def _init_weights(self, module):
611
+ std = self.config.initializer_range
612
+ if isinstance(module, nn.Linear):
613
+ module.weight.data.normal_(mean=0.0, std=std)
614
+ if module.bias is not None:
615
+ module.bias.data.zero_()
616
+ elif isinstance(module, nn.Embedding):
617
+ module.weight.data.normal_(mean=0.0, std=std)
618
+ if module.padding_idx is not None:
619
+ module.weight.data[module.padding_idx].zero_()
620
+
621
+ def _set_gradient_checkpointing(self, module, value=False):
622
+ if isinstance(module, MistralModel):
623
+ module.gradient_checkpointing = value
624
+
625
+
626
+ MISTRAL_INPUTS_DOCSTRING = r"""
627
+ Args:
628
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
629
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
630
+ it.
631
+
632
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
633
+ [`PreTrainedTokenizer.__call__`] for details.
634
+
635
+ [What are input IDs?](../glossary#input-ids)
636
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
637
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
638
+
639
+ - 1 for tokens that are **not masked**,
640
+ - 0 for tokens that are **masked**.
641
+
642
+ [What are attention masks?](../glossary#attention-mask)
643
+
644
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
645
+ [`PreTrainedTokenizer.__call__`] for details.
646
+
647
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
648
+ `past_key_values`).
649
+
650
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
651
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
652
+ information on the default strategy.
653
+
654
+ - 1 indicates the head is **not masked**,
655
+ - 0 indicates the head is **masked**.
656
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
657
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
658
+ config.n_positions - 1]`.
659
+
660
+ [What are position IDs?](../glossary#position-ids)
661
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
662
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
663
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
664
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
665
+
666
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
667
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
668
+
669
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
670
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
671
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
672
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
673
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
674
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
675
+ model's internal embedding lookup matrix.
676
+ use_cache (`bool`, *optional*):
677
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
678
+ `past_key_values`).
679
+ output_attentions (`bool`, *optional*):
680
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
681
+ tensors for more detail.
682
+ output_hidden_states (`bool`, *optional*):
683
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
684
+ more detail.
685
+ return_dict (`bool`, *optional*):
686
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
687
+ """
688
+
689
+
690
+ @add_start_docstrings(
691
+ "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
692
+ MISTRAL_START_DOCSTRING,
693
+ )
694
+ class MistralModel(MistralPreTrainedModel):
695
+ """
696
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
697
+
698
+ Args:
699
+ config: MistralConfig
700
+ """
701
+
702
+ def __init__(self, config: MistralConfig):
703
+ super().__init__(config)
704
+ self.padding_idx = config.pad_token_id
705
+ self.vocab_size = config.vocab_size
706
+
707
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
708
+ self.layers = nn.ModuleList([MistralDecoderLayer(config) for _ in range(config.num_hidden_layers)])
709
+ self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
710
+
711
+ self.gradient_checkpointing = False
712
+ # Initialize weights and apply final processing
713
+ self.post_init()
714
+
715
+ def get_input_embeddings(self):
716
+ return self.embed_tokens
717
+
718
+ def set_input_embeddings(self, value):
719
+ self.embed_tokens = value
720
+
721
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
722
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length, sliding_window):
723
+ # create causal mask
724
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
725
+ combined_attention_mask = None
726
+ if input_shape[-1] > 1:
727
+
728
+ if sliding_window is not None:
729
+ combined_attention_mask = _make_sliding_window_causal_mask(
730
+ input_shape,
731
+ inputs_embeds.dtype,
732
+ device=inputs_embeds.device,
733
+ past_key_values_length=past_key_values_length,
734
+ sliding_window=sliding_window,
735
+ )
736
+ else:
737
+ combined_attention_mask = _make_causal_mask(
738
+ input_shape,
739
+ inputs_embeds.dtype,
740
+ device=inputs_embeds.device,
741
+ past_key_values_length=past_key_values_length,
742
+ )
743
+
744
+ if attention_mask is not None:
745
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
746
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
747
+ inputs_embeds.device
748
+ )
749
+ combined_attention_mask = (
750
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
751
+ )
752
+
753
+ return combined_attention_mask
754
+
755
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
756
+ def forward(
757
+ self,
758
+ input_ids: torch.LongTensor = None,
759
+ attention_mask: Optional[torch.Tensor] = None,
760
+ position_ids: Optional[torch.LongTensor] = None,
761
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
762
+ inputs_embeds: Optional[torch.FloatTensor] = None,
763
+ use_cache: Optional[bool] = None,
764
+ output_attentions: Optional[bool] = None,
765
+ output_hidden_states: Optional[bool] = None,
766
+ return_dict: Optional[bool] = None,
767
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
768
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
769
+ output_hidden_states = (
770
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
771
+ )
772
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
773
+
774
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
775
+
776
+ # retrieve input_ids and inputs_embeds
777
+ if input_ids is not None and inputs_embeds is not None:
778
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
779
+ elif input_ids is not None:
780
+ batch_size, seq_length = input_ids.shape
781
+ elif inputs_embeds is not None:
782
+ batch_size, seq_length, _ = inputs_embeds.shape
783
+ else:
784
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
785
+
786
+ seq_length_with_past = seq_length
787
+ past_key_values_length = 0
788
+
789
+ if past_key_values is not None:
790
+ past_key_values_length = past_key_values[0][0].shape[2]
791
+ seq_length_with_past = seq_length_with_past + past_key_values_length
792
+
793
+ if position_ids is None:
794
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
795
+ position_ids = torch.arange(
796
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
797
+ )
798
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
799
+ else:
800
+ position_ids = position_ids.view(-1, seq_length).long()
801
+
802
+ if inputs_embeds is None:
803
+ inputs_embeds = self.embed_tokens(input_ids)
804
+ # embed positions
805
+ # if attention_mask is None:
806
+ # attention_mask = torch.ones(
807
+ # (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
808
+ # )
809
+
810
+ if attention_mask is not None:
811
+ attention_mask = self._prepare_decoder_attention_mask(
812
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length,
813
+ sliding_window=self.config.sliding_window if hasattr(self.config, "sliding_window") else None,
814
+ )
815
+ # elif 0 in attention_mask:
816
+ # padding_mask = attention_mask
817
+ # if (
818
+ # padding_mask is not None
819
+ # and hasattr(self.config, "_flash_attn_2_enabled")
820
+ # and self.config._flash_attn_2_enabled
821
+ # ):
822
+ # is_padding_right = padding_mask[:, -1].sum().item() != batch_size
823
+ # if is_padding_right:
824
+ # raise ValueError(
825
+ # "You are attempting to perform batched generation with padding_side='right'"
826
+ # " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to "
827
+ # " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
828
+ # )
829
+
830
+ # attention_mask = self._prepare_decoder_attention_mask(
831
+ # attention_mask,
832
+ # (batch_size, seq_length),
833
+ # inputs_embeds,
834
+ # past_key_values_length,
835
+ # sliding_window=self.config.sliding_window if hasattr(self.config, "sliding_window") else None,
836
+ # )
837
+ hidden_states = inputs_embeds
838
+
839
+ if self.gradient_checkpointing and self.training:
840
+ if use_cache:
841
+ logger.warning_once(
842
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
843
+ )
844
+ use_cache = False
845
+
846
+ # decoder layers
847
+ all_hidden_states = () if output_hidden_states else None
848
+ all_self_attns = () if output_attentions else None
849
+ next_decoder_cache = () if use_cache else None
850
+
851
+ for idx, decoder_layer in enumerate(self.layers):
852
+ if output_hidden_states:
853
+ all_hidden_states += (hidden_states,)
854
+
855
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
856
+
857
+ if self.gradient_checkpointing and self.training:
858
+
859
+ def create_custom_forward(module):
860
+ def custom_forward(*inputs):
861
+ # None for past_key_value
862
+ return module(*inputs, output_attentions, None)
863
+
864
+ return custom_forward
865
+
866
+ layer_outputs = torch.utils.checkpoint.checkpoint(
867
+ create_custom_forward(decoder_layer),
868
+ hidden_states,
869
+ attention_mask,
870
+ position_ids,
871
+ None,
872
+ )
873
+ else:
874
+ layer_outputs = decoder_layer(
875
+ hidden_states,
876
+ attention_mask=attention_mask,
877
+ position_ids=position_ids,
878
+ past_key_value=past_key_value,
879
+ output_attentions=output_attentions,
880
+ use_cache=use_cache,
881
+ )
882
+
883
+ hidden_states = layer_outputs[0]
884
+
885
+ if use_cache:
886
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
887
+
888
+ if output_attentions:
889
+ all_self_attns += (layer_outputs[1],)
890
+
891
+ hidden_states = self.norm(hidden_states)
892
+
893
+ # add hidden states from the last decoder layer
894
+ if output_hidden_states:
895
+ all_hidden_states += (hidden_states,)
896
+
897
+ next_cache = next_decoder_cache if use_cache else None
898
+ if not return_dict:
899
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
900
+ return BaseModelOutputWithPast(
901
+ last_hidden_state=hidden_states,
902
+ past_key_values=next_cache,
903
+ hidden_states=all_hidden_states,
904
+ attentions=all_self_attns,
905
+ )
906
+
907
+
908
+ class MistralForCausalLM(MistralPreTrainedModel):
909
+ _tied_weights_keys = ["lm_head.weight"]
910
+
911
+ def __init__(self, config):
912
+ super().__init__(config)
913
+ self.model = MistralModel(config)
914
+ self.vocab_size = config.vocab_size
915
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
916
+
917
+ # Initialize weights and apply final processing
918
+ self.post_init()
919
+
920
+ def get_input_embeddings(self):
921
+ return self.model.embed_tokens
922
+
923
+ def set_input_embeddings(self, value):
924
+ self.model.embed_tokens = value
925
+
926
+ def get_output_embeddings(self):
927
+ return self.lm_head
928
+
929
+ def set_output_embeddings(self, new_embeddings):
930
+ self.lm_head = new_embeddings
931
+
932
+ def set_decoder(self, decoder):
933
+ self.model = decoder
934
+
935
+ def get_decoder(self):
936
+ return self.model
937
+
938
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
939
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
940
+ def forward(
941
+ self,
942
+ input_ids: torch.LongTensor = None,
943
+ attention_mask: Optional[torch.Tensor] = None,
944
+ position_ids: Optional[torch.LongTensor] = None,
945
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
946
+ inputs_embeds: Optional[torch.FloatTensor] = None,
947
+ labels: Optional[torch.LongTensor] = None,
948
+ use_cache: Optional[bool] = None,
949
+ output_attentions: Optional[bool] = None,
950
+ output_hidden_states: Optional[bool] = None,
951
+ return_dict: Optional[bool] = None,
952
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
953
+ r"""
954
+ Args:
955
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
956
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
957
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
958
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
959
+
960
+ Returns:
961
+
962
+ Example:
963
+
964
+ ```python
965
+ >>> from transformers import AutoTokenizer, MistralForCausalLM
966
+
967
+ >>> model = MistralForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
968
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
969
+
970
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
971
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
972
+
973
+ >>> # Generate
974
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
975
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
976
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
977
+ ```"""
978
+
979
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
980
+ output_hidden_states = (
981
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
982
+ )
983
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
984
+
985
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
986
+ outputs = self.model(
987
+ input_ids=input_ids,
988
+ attention_mask=attention_mask,
989
+ position_ids=position_ids,
990
+ past_key_values=past_key_values,
991
+ inputs_embeds=inputs_embeds,
992
+ use_cache=use_cache,
993
+ output_attentions=output_attentions,
994
+ output_hidden_states=output_hidden_states,
995
+ return_dict=return_dict,
996
+ )
997
+
998
+ hidden_states = outputs[0]
999
+
1000
+ logits = self.lm_head(hidden_states)
1001
+ logits = logits.float()
1002
+
1003
+ loss = None
1004
+ if labels is not None:
1005
+ # Shift so that tokens < n predict n
1006
+ shift_logits = logits[..., :-1, :].contiguous()
1007
+ shift_labels = labels[..., 1:].contiguous()
1008
+ # Flatten the tokens
1009
+ loss_fct = CrossEntropyLoss()
1010
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1011
+ shift_labels = shift_labels.view(-1)
1012
+ # Enable model parallelism
1013
+ shift_labels = shift_labels.to(shift_logits.device)
1014
+ loss = loss_fct(shift_logits, shift_labels)
1015
+
1016
+ if not return_dict:
1017
+ output = (logits,) + outputs[1:]
1018
+ return (loss,) + output if loss is not None else output
1019
+
1020
+ return CausalLMOutputWithPast(
1021
+ loss=loss,
1022
+ logits=logits,
1023
+ past_key_values=outputs.past_key_values,
1024
+ hidden_states=outputs.hidden_states,
1025
+ attentions=outputs.attentions,
1026
+ )
1027
+
1028
+ def prepare_inputs_for_generation(
1029
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1030
+ ):
1031
+ if past_key_values:
1032
+ input_ids = input_ids[:, -1:]
1033
+
1034
+ position_ids = kwargs.get("position_ids", None)
1035
+ if attention_mask is not None and position_ids is None:
1036
+ # create position_ids on the fly for batch generation
1037
+ position_ids = attention_mask.long().cumsum(-1) - 1
1038
+ position_ids.masked_fill_(attention_mask == 0, 1)
1039
+ if past_key_values:
1040
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1041
+
1042
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1043
+ if inputs_embeds is not None and past_key_values is None:
1044
+ model_inputs = {"inputs_embeds": inputs_embeds}
1045
+ else:
1046
+ model_inputs = {"input_ids": input_ids}
1047
+
1048
+ model_inputs.update(
1049
+ {
1050
+ "position_ids": position_ids,
1051
+ "past_key_values": past_key_values,
1052
+ "use_cache": kwargs.get("use_cache"),
1053
+ "attention_mask": attention_mask,
1054
+ }
1055
+ )
1056
+ return model_inputs
1057
+
1058
+ @staticmethod
1059
+ def _reorder_cache(past_key_values, beam_idx):
1060
+ reordered_past = ()
1061
+ for layer_past in past_key_values:
1062
+ reordered_past += (
1063
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1064
+ )
1065
+ return reordered_past
1066
+
1067
+
1068
+ @add_start_docstrings(
1069
+ """
1070
+ The Mistral Model transformer with a sequence classification head on top (linear layer).
1071
+
1072
+ [`MistralForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1073
+ (e.g. GPT-2) do.
1074
+
1075
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1076
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1077
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1078
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1079
+ each row of the batch).
1080
+ """,
1081
+ MISTRAL_START_DOCSTRING,
1082
+ )
1083
+ class MistralForSequenceClassification(MistralPreTrainedModel):
1084
+ def __init__(self, config):
1085
+ super().__init__(config)
1086
+ self.num_labels = config.num_labels
1087
+ self.model = MistralModel(config)
1088
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1089
+
1090
+ # Initialize weights and apply final processing
1091
+ self.post_init()
1092
+
1093
+ def get_input_embeddings(self):
1094
+ return self.model.embed_tokens
1095
+
1096
+ def set_input_embeddings(self, value):
1097
+ self.model.embed_tokens = value
1098
+
1099
+ @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
1100
+ def forward(
1101
+ self,
1102
+ input_ids: torch.LongTensor = None,
1103
+ attention_mask: Optional[torch.Tensor] = None,
1104
+ position_ids: Optional[torch.LongTensor] = None,
1105
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1106
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1107
+ labels: Optional[torch.LongTensor] = None,
1108
+ use_cache: Optional[bool] = None,
1109
+ output_attentions: Optional[bool] = None,
1110
+ output_hidden_states: Optional[bool] = None,
1111
+ return_dict: Optional[bool] = None,
1112
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1113
+ r"""
1114
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1115
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1116
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1117
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1118
+ """
1119
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1120
+
1121
+ transformer_outputs = self.model(
1122
+ input_ids,
1123
+ attention_mask=attention_mask,
1124
+ position_ids=position_ids,
1125
+ past_key_values=past_key_values,
1126
+ inputs_embeds=inputs_embeds,
1127
+ use_cache=use_cache,
1128
+ output_attentions=output_attentions,
1129
+ output_hidden_states=output_hidden_states,
1130
+ return_dict=return_dict,
1131
+ )
1132
+ hidden_states = transformer_outputs[0]
1133
+ logits = self.score(hidden_states)
1134
+
1135
+ if input_ids is not None:
1136
+ batch_size = input_ids.shape[0]
1137
+ else:
1138
+ batch_size = inputs_embeds.shape[0]
1139
+
1140
+ if self.config.pad_token_id is None and batch_size != 1:
1141
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1142
+ if self.config.pad_token_id is None:
1143
+ sequence_lengths = -1
1144
+ else:
1145
+ if input_ids is not None:
1146
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).long().argmax(-1) - 1).to(
1147
+ logits.device
1148
+ )
1149
+ else:
1150
+ sequence_lengths = -1
1151
+
1152
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1153
+
1154
+ loss = None
1155
+ if labels is not None:
1156
+ labels = labels.to(logits.device)
1157
+ if self.config.problem_type is None:
1158
+ if self.num_labels == 1:
1159
+ self.config.problem_type = "regression"
1160
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1161
+ self.config.problem_type = "single_label_classification"
1162
+ else:
1163
+ self.config.problem_type = "multi_label_classification"
1164
+
1165
+ if self.config.problem_type == "regression":
1166
+ loss_fct = MSELoss()
1167
+ if self.num_labels == 1:
1168
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1169
+ else:
1170
+ loss = loss_fct(pooled_logits, labels)
1171
+ elif self.config.problem_type == "single_label_classification":
1172
+ loss_fct = CrossEntropyLoss()
1173
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1174
+ elif self.config.problem_type == "multi_label_classification":
1175
+ loss_fct = BCEWithLogitsLoss()
1176
+ loss = loss_fct(pooled_logits, labels)
1177
+ if not return_dict:
1178
+ output = (pooled_logits,) + transformer_outputs[1:]
1179
+ return ((loss,) + output) if loss is not None else output
1180
+
1181
+ return SequenceClassifierOutputWithPast(
1182
+ loss=loss,
1183
+ logits=pooled_logits,
1184
+ past_key_values=transformer_outputs.past_key_values,
1185
+ hidden_states=transformer_outputs.hidden_states,
1186
+ attentions=transformer_outputs.attentions,
1187
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "[PAD]",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ }
37
+ },
38
+ "additional_special_tokens": [],
39
+ "bos_token": "<s>",
40
+ "clean_up_tokenization_spaces": false,
41
+ "eos_token": "</s>",
42
+ "legacy": true,
43
+ "model_max_length": 1000000000000000019884624838656,
44
+ "pad_token": "[PAD]",
45
+ "padding_side": "left",
46
+ "sp_model_kwargs": {},
47
+ "spaces_between_special_tokens": false,
48
+ "tokenizer_class": "LlamaTokenizer",
49
+ "unk_token": "</s>",
50
+ "use_default_system_prompt": true,
51
+ "use_fast": true
52
+ }
trainer_state.json ADDED
@@ -0,0 +1,1379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.0878255367279053,
3
+ "best_model_checkpoint": "path_to_your_model_and_data/pose/results/2k-32k-yarn/checkpoint-1000",
4
+ "epoch": 0.6896551724137931,
5
+ "eval_steps": 50,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0,
13
+ "learning_rate": 1.2041199826559246e-05,
14
+ "loss": 1.6869,
15
+ "step": 5
16
+ },
17
+ {
18
+ "epoch": 0.01,
19
+ "learning_rate": 1.9084850188786497e-05,
20
+ "loss": 1.6045,
21
+ "step": 10
22
+ },
23
+ {
24
+ "epoch": 0.01,
25
+ "learning_rate": 1.993939393939394e-05,
26
+ "loss": 1.5365,
27
+ "step": 15
28
+ },
29
+ {
30
+ "epoch": 0.01,
31
+ "learning_rate": 1.985858585858586e-05,
32
+ "loss": 1.6548,
33
+ "step": 20
34
+ },
35
+ {
36
+ "epoch": 0.02,
37
+ "learning_rate": 1.975757575757576e-05,
38
+ "loss": 1.7008,
39
+ "step": 25
40
+ },
41
+ {
42
+ "epoch": 0.02,
43
+ "learning_rate": 1.9656565656565658e-05,
44
+ "loss": 1.6702,
45
+ "step": 30
46
+ },
47
+ {
48
+ "epoch": 0.02,
49
+ "learning_rate": 1.9555555555555557e-05,
50
+ "loss": 1.6943,
51
+ "step": 35
52
+ },
53
+ {
54
+ "epoch": 0.03,
55
+ "learning_rate": 1.9454545454545457e-05,
56
+ "loss": 1.6775,
57
+ "step": 40
58
+ },
59
+ {
60
+ "epoch": 0.03,
61
+ "learning_rate": 1.9353535353535356e-05,
62
+ "loss": 1.7281,
63
+ "step": 45
64
+ },
65
+ {
66
+ "epoch": 0.03,
67
+ "learning_rate": 1.9252525252525252e-05,
68
+ "loss": 1.6746,
69
+ "step": 50
70
+ },
71
+ {
72
+ "epoch": 0.03,
73
+ "eval_loss": 2.0998010635375977,
74
+ "eval_runtime": 127.4969,
75
+ "eval_samples_per_second": 0.784,
76
+ "eval_steps_per_second": 0.102,
77
+ "step": 50
78
+ },
79
+ {
80
+ "epoch": 0.04,
81
+ "learning_rate": 1.9151515151515152e-05,
82
+ "loss": 1.7754,
83
+ "step": 55
84
+ },
85
+ {
86
+ "epoch": 0.04,
87
+ "learning_rate": 1.905050505050505e-05,
88
+ "loss": 1.7094,
89
+ "step": 60
90
+ },
91
+ {
92
+ "epoch": 0.04,
93
+ "learning_rate": 1.894949494949495e-05,
94
+ "loss": 1.6483,
95
+ "step": 65
96
+ },
97
+ {
98
+ "epoch": 0.05,
99
+ "learning_rate": 1.886868686868687e-05,
100
+ "loss": 1.7341,
101
+ "step": 70
102
+ },
103
+ {
104
+ "epoch": 0.05,
105
+ "learning_rate": 1.876767676767677e-05,
106
+ "loss": 1.6221,
107
+ "step": 75
108
+ },
109
+ {
110
+ "epoch": 0.06,
111
+ "learning_rate": 1.866666666666667e-05,
112
+ "loss": 1.5681,
113
+ "step": 80
114
+ },
115
+ {
116
+ "epoch": 0.06,
117
+ "learning_rate": 1.8565656565656568e-05,
118
+ "loss": 1.6668,
119
+ "step": 85
120
+ },
121
+ {
122
+ "epoch": 0.06,
123
+ "learning_rate": 1.8464646464646464e-05,
124
+ "loss": 1.6743,
125
+ "step": 90
126
+ },
127
+ {
128
+ "epoch": 0.07,
129
+ "learning_rate": 1.8363636363636367e-05,
130
+ "loss": 1.7875,
131
+ "step": 95
132
+ },
133
+ {
134
+ "epoch": 0.07,
135
+ "learning_rate": 1.8262626262626263e-05,
136
+ "loss": 1.7397,
137
+ "step": 100
138
+ },
139
+ {
140
+ "epoch": 0.07,
141
+ "eval_loss": 2.1947836875915527,
142
+ "eval_runtime": 127.4557,
143
+ "eval_samples_per_second": 0.785,
144
+ "eval_steps_per_second": 0.102,
145
+ "step": 100
146
+ },
147
+ {
148
+ "epoch": 0.07,
149
+ "learning_rate": 1.8161616161616163e-05,
150
+ "loss": 1.6912,
151
+ "step": 105
152
+ },
153
+ {
154
+ "epoch": 0.08,
155
+ "learning_rate": 1.8060606060606062e-05,
156
+ "loss": 1.7995,
157
+ "step": 110
158
+ },
159
+ {
160
+ "epoch": 0.08,
161
+ "learning_rate": 1.795959595959596e-05,
162
+ "loss": 1.6217,
163
+ "step": 115
164
+ },
165
+ {
166
+ "epoch": 0.08,
167
+ "learning_rate": 1.785858585858586e-05,
168
+ "loss": 1.7081,
169
+ "step": 120
170
+ },
171
+ {
172
+ "epoch": 0.09,
173
+ "learning_rate": 1.775757575757576e-05,
174
+ "loss": 1.7041,
175
+ "step": 125
176
+ },
177
+ {
178
+ "epoch": 0.09,
179
+ "learning_rate": 1.765656565656566e-05,
180
+ "loss": 1.6235,
181
+ "step": 130
182
+ },
183
+ {
184
+ "epoch": 0.09,
185
+ "learning_rate": 1.7555555555555556e-05,
186
+ "loss": 1.6583,
187
+ "step": 135
188
+ },
189
+ {
190
+ "epoch": 0.1,
191
+ "learning_rate": 1.7454545454545456e-05,
192
+ "loss": 1.7467,
193
+ "step": 140
194
+ },
195
+ {
196
+ "epoch": 0.1,
197
+ "learning_rate": 1.7353535353535355e-05,
198
+ "loss": 1.6531,
199
+ "step": 145
200
+ },
201
+ {
202
+ "epoch": 0.1,
203
+ "learning_rate": 1.7252525252525255e-05,
204
+ "loss": 1.7272,
205
+ "step": 150
206
+ },
207
+ {
208
+ "epoch": 0.1,
209
+ "eval_loss": 2.1845450401306152,
210
+ "eval_runtime": 127.0346,
211
+ "eval_samples_per_second": 0.787,
212
+ "eval_steps_per_second": 0.102,
213
+ "step": 150
214
+ },
215
+ {
216
+ "epoch": 0.11,
217
+ "learning_rate": 1.715151515151515e-05,
218
+ "loss": 1.6193,
219
+ "step": 155
220
+ },
221
+ {
222
+ "epoch": 0.11,
223
+ "learning_rate": 1.7050505050505054e-05,
224
+ "loss": 1.6174,
225
+ "step": 160
226
+ },
227
+ {
228
+ "epoch": 0.11,
229
+ "learning_rate": 1.694949494949495e-05,
230
+ "loss": 1.665,
231
+ "step": 165
232
+ },
233
+ {
234
+ "epoch": 0.12,
235
+ "learning_rate": 1.684848484848485e-05,
236
+ "loss": 1.7045,
237
+ "step": 170
238
+ },
239
+ {
240
+ "epoch": 0.12,
241
+ "learning_rate": 1.674747474747475e-05,
242
+ "loss": 1.7124,
243
+ "step": 175
244
+ },
245
+ {
246
+ "epoch": 0.12,
247
+ "learning_rate": 1.6646464646464648e-05,
248
+ "loss": 1.8402,
249
+ "step": 180
250
+ },
251
+ {
252
+ "epoch": 0.13,
253
+ "learning_rate": 1.6545454545454548e-05,
254
+ "loss": 1.6673,
255
+ "step": 185
256
+ },
257
+ {
258
+ "epoch": 0.13,
259
+ "learning_rate": 1.6444444444444444e-05,
260
+ "loss": 1.7193,
261
+ "step": 190
262
+ },
263
+ {
264
+ "epoch": 0.13,
265
+ "learning_rate": 1.6343434343434346e-05,
266
+ "loss": 1.6832,
267
+ "step": 195
268
+ },
269
+ {
270
+ "epoch": 0.14,
271
+ "learning_rate": 1.6242424242424243e-05,
272
+ "loss": 1.6866,
273
+ "step": 200
274
+ },
275
+ {
276
+ "epoch": 0.14,
277
+ "eval_loss": 2.1831867694854736,
278
+ "eval_runtime": 127.2543,
279
+ "eval_samples_per_second": 0.786,
280
+ "eval_steps_per_second": 0.102,
281
+ "step": 200
282
+ },
283
+ {
284
+ "epoch": 0.14,
285
+ "learning_rate": 1.6141414141414142e-05,
286
+ "loss": 1.6387,
287
+ "step": 205
288
+ },
289
+ {
290
+ "epoch": 0.14,
291
+ "learning_rate": 1.604040404040404e-05,
292
+ "loss": 1.7822,
293
+ "step": 210
294
+ },
295
+ {
296
+ "epoch": 0.15,
297
+ "learning_rate": 1.593939393939394e-05,
298
+ "loss": 1.6705,
299
+ "step": 215
300
+ },
301
+ {
302
+ "epoch": 0.15,
303
+ "learning_rate": 1.585858585858586e-05,
304
+ "loss": 1.6592,
305
+ "step": 220
306
+ },
307
+ {
308
+ "epoch": 0.16,
309
+ "learning_rate": 1.575757575757576e-05,
310
+ "loss": 1.6256,
311
+ "step": 225
312
+ },
313
+ {
314
+ "epoch": 0.16,
315
+ "learning_rate": 1.565656565656566e-05,
316
+ "loss": 1.6946,
317
+ "step": 230
318
+ },
319
+ {
320
+ "epoch": 0.16,
321
+ "learning_rate": 1.555555555555556e-05,
322
+ "loss": 1.6859,
323
+ "step": 235
324
+ },
325
+ {
326
+ "epoch": 0.17,
327
+ "learning_rate": 1.5454545454545454e-05,
328
+ "loss": 1.674,
329
+ "step": 240
330
+ },
331
+ {
332
+ "epoch": 0.17,
333
+ "learning_rate": 1.5353535353535354e-05,
334
+ "loss": 1.7687,
335
+ "step": 245
336
+ },
337
+ {
338
+ "epoch": 0.17,
339
+ "learning_rate": 1.5252525252525255e-05,
340
+ "loss": 1.6456,
341
+ "step": 250
342
+ },
343
+ {
344
+ "epoch": 0.17,
345
+ "eval_loss": 2.1776535511016846,
346
+ "eval_runtime": 127.5296,
347
+ "eval_samples_per_second": 0.784,
348
+ "eval_steps_per_second": 0.102,
349
+ "step": 250
350
+ },
351
+ {
352
+ "epoch": 0.18,
353
+ "learning_rate": 1.5151515151515153e-05,
354
+ "loss": 1.6896,
355
+ "step": 255
356
+ },
357
+ {
358
+ "epoch": 0.18,
359
+ "learning_rate": 1.505050505050505e-05,
360
+ "loss": 1.7209,
361
+ "step": 260
362
+ },
363
+ {
364
+ "epoch": 0.18,
365
+ "learning_rate": 1.4949494949494952e-05,
366
+ "loss": 1.66,
367
+ "step": 265
368
+ },
369
+ {
370
+ "epoch": 0.19,
371
+ "learning_rate": 1.484848484848485e-05,
372
+ "loss": 1.6708,
373
+ "step": 270
374
+ },
375
+ {
376
+ "epoch": 0.19,
377
+ "learning_rate": 1.4747474747474747e-05,
378
+ "loss": 1.6432,
379
+ "step": 275
380
+ },
381
+ {
382
+ "epoch": 0.19,
383
+ "learning_rate": 1.4646464646464649e-05,
384
+ "loss": 1.6676,
385
+ "step": 280
386
+ },
387
+ {
388
+ "epoch": 0.2,
389
+ "learning_rate": 1.4545454545454546e-05,
390
+ "loss": 1.6768,
391
+ "step": 285
392
+ },
393
+ {
394
+ "epoch": 0.2,
395
+ "learning_rate": 1.4444444444444446e-05,
396
+ "loss": 1.7592,
397
+ "step": 290
398
+ },
399
+ {
400
+ "epoch": 0.2,
401
+ "learning_rate": 1.4343434343434344e-05,
402
+ "loss": 1.6192,
403
+ "step": 295
404
+ },
405
+ {
406
+ "epoch": 0.21,
407
+ "learning_rate": 1.4242424242424245e-05,
408
+ "loss": 1.5604,
409
+ "step": 300
410
+ },
411
+ {
412
+ "epoch": 0.21,
413
+ "eval_loss": 2.1859655380249023,
414
+ "eval_runtime": 127.9048,
415
+ "eval_samples_per_second": 0.782,
416
+ "eval_steps_per_second": 0.102,
417
+ "step": 300
418
+ },
419
+ {
420
+ "epoch": 0.21,
421
+ "learning_rate": 1.4141414141414143e-05,
422
+ "loss": 1.6379,
423
+ "step": 305
424
+ },
425
+ {
426
+ "epoch": 0.21,
427
+ "learning_rate": 1.404040404040404e-05,
428
+ "loss": 1.6012,
429
+ "step": 310
430
+ },
431
+ {
432
+ "epoch": 0.22,
433
+ "learning_rate": 1.3939393939393942e-05,
434
+ "loss": 1.7011,
435
+ "step": 315
436
+ },
437
+ {
438
+ "epoch": 0.22,
439
+ "learning_rate": 1.383838383838384e-05,
440
+ "loss": 1.6168,
441
+ "step": 320
442
+ },
443
+ {
444
+ "epoch": 0.22,
445
+ "learning_rate": 1.3737373737373739e-05,
446
+ "loss": 1.6507,
447
+ "step": 325
448
+ },
449
+ {
450
+ "epoch": 0.23,
451
+ "learning_rate": 1.3636363636363637e-05,
452
+ "loss": 1.7676,
453
+ "step": 330
454
+ },
455
+ {
456
+ "epoch": 0.23,
457
+ "learning_rate": 1.3535353535353538e-05,
458
+ "loss": 1.6707,
459
+ "step": 335
460
+ },
461
+ {
462
+ "epoch": 0.23,
463
+ "learning_rate": 1.3434343434343436e-05,
464
+ "loss": 1.6062,
465
+ "step": 340
466
+ },
467
+ {
468
+ "epoch": 0.24,
469
+ "learning_rate": 1.3333333333333333e-05,
470
+ "loss": 1.7136,
471
+ "step": 345
472
+ },
473
+ {
474
+ "epoch": 0.24,
475
+ "learning_rate": 1.3232323232323234e-05,
476
+ "loss": 1.661,
477
+ "step": 350
478
+ },
479
+ {
480
+ "epoch": 0.24,
481
+ "eval_loss": 2.189072370529175,
482
+ "eval_runtime": 127.1833,
483
+ "eval_samples_per_second": 0.786,
484
+ "eval_steps_per_second": 0.102,
485
+ "step": 350
486
+ },
487
+ {
488
+ "epoch": 0.24,
489
+ "learning_rate": 1.3131313131313132e-05,
490
+ "loss": 1.6766,
491
+ "step": 355
492
+ },
493
+ {
494
+ "epoch": 0.25,
495
+ "learning_rate": 1.3030303030303032e-05,
496
+ "loss": 1.6497,
497
+ "step": 360
498
+ },
499
+ {
500
+ "epoch": 0.25,
501
+ "learning_rate": 1.2929292929292931e-05,
502
+ "loss": 1.6738,
503
+ "step": 365
504
+ },
505
+ {
506
+ "epoch": 0.26,
507
+ "learning_rate": 1.2828282828282829e-05,
508
+ "loss": 1.6673,
509
+ "step": 370
510
+ },
511
+ {
512
+ "epoch": 0.26,
513
+ "learning_rate": 1.2727272727272728e-05,
514
+ "loss": 1.6767,
515
+ "step": 375
516
+ },
517
+ {
518
+ "epoch": 0.26,
519
+ "learning_rate": 1.2626262626262626e-05,
520
+ "loss": 1.7335,
521
+ "step": 380
522
+ },
523
+ {
524
+ "epoch": 0.27,
525
+ "learning_rate": 1.2525252525252527e-05,
526
+ "loss": 1.5885,
527
+ "step": 385
528
+ },
529
+ {
530
+ "epoch": 0.27,
531
+ "learning_rate": 1.2424242424242425e-05,
532
+ "loss": 1.5992,
533
+ "step": 390
534
+ },
535
+ {
536
+ "epoch": 0.27,
537
+ "learning_rate": 1.2323232323232323e-05,
538
+ "loss": 1.6115,
539
+ "step": 395
540
+ },
541
+ {
542
+ "epoch": 0.28,
543
+ "learning_rate": 1.2222222222222224e-05,
544
+ "loss": 1.7397,
545
+ "step": 400
546
+ },
547
+ {
548
+ "epoch": 0.28,
549
+ "eval_loss": 2.1878244876861572,
550
+ "eval_runtime": 127.871,
551
+ "eval_samples_per_second": 0.782,
552
+ "eval_steps_per_second": 0.102,
553
+ "step": 400
554
+ },
555
+ {
556
+ "epoch": 0.28,
557
+ "learning_rate": 1.2121212121212122e-05,
558
+ "loss": 1.6486,
559
+ "step": 405
560
+ },
561
+ {
562
+ "epoch": 0.28,
563
+ "learning_rate": 1.2020202020202021e-05,
564
+ "loss": 1.6822,
565
+ "step": 410
566
+ },
567
+ {
568
+ "epoch": 0.29,
569
+ "learning_rate": 1.191919191919192e-05,
570
+ "loss": 1.6529,
571
+ "step": 415
572
+ },
573
+ {
574
+ "epoch": 0.29,
575
+ "learning_rate": 1.181818181818182e-05,
576
+ "loss": 1.6155,
577
+ "step": 420
578
+ },
579
+ {
580
+ "epoch": 0.29,
581
+ "learning_rate": 1.1717171717171718e-05,
582
+ "loss": 1.6216,
583
+ "step": 425
584
+ },
585
+ {
586
+ "epoch": 0.3,
587
+ "learning_rate": 1.1616161616161616e-05,
588
+ "loss": 1.5922,
589
+ "step": 430
590
+ },
591
+ {
592
+ "epoch": 0.3,
593
+ "learning_rate": 1.1515151515151517e-05,
594
+ "loss": 1.7198,
595
+ "step": 435
596
+ },
597
+ {
598
+ "epoch": 0.3,
599
+ "learning_rate": 1.1414141414141415e-05,
600
+ "loss": 1.5924,
601
+ "step": 440
602
+ },
603
+ {
604
+ "epoch": 0.31,
605
+ "learning_rate": 1.1313131313131314e-05,
606
+ "loss": 1.6722,
607
+ "step": 445
608
+ },
609
+ {
610
+ "epoch": 0.31,
611
+ "learning_rate": 1.1212121212121212e-05,
612
+ "loss": 1.5975,
613
+ "step": 450
614
+ },
615
+ {
616
+ "epoch": 0.31,
617
+ "eval_loss": 2.185619592666626,
618
+ "eval_runtime": 127.8889,
619
+ "eval_samples_per_second": 0.782,
620
+ "eval_steps_per_second": 0.102,
621
+ "step": 450
622
+ },
623
+ {
624
+ "epoch": 0.31,
625
+ "learning_rate": 1.1111111111111113e-05,
626
+ "loss": 1.6218,
627
+ "step": 455
628
+ },
629
+ {
630
+ "epoch": 0.32,
631
+ "learning_rate": 1.1010101010101011e-05,
632
+ "loss": 1.613,
633
+ "step": 460
634
+ },
635
+ {
636
+ "epoch": 0.32,
637
+ "learning_rate": 1.0909090909090909e-05,
638
+ "loss": 1.7023,
639
+ "step": 465
640
+ },
641
+ {
642
+ "epoch": 0.32,
643
+ "learning_rate": 1.080808080808081e-05,
644
+ "loss": 1.6097,
645
+ "step": 470
646
+ },
647
+ {
648
+ "epoch": 0.33,
649
+ "learning_rate": 1.0707070707070708e-05,
650
+ "loss": 1.5922,
651
+ "step": 475
652
+ },
653
+ {
654
+ "epoch": 0.33,
655
+ "learning_rate": 1.0606060606060606e-05,
656
+ "loss": 1.6736,
657
+ "step": 480
658
+ },
659
+ {
660
+ "epoch": 0.33,
661
+ "learning_rate": 1.0505050505050507e-05,
662
+ "loss": 1.6033,
663
+ "step": 485
664
+ },
665
+ {
666
+ "epoch": 0.34,
667
+ "learning_rate": 1.0404040404040405e-05,
668
+ "loss": 1.6552,
669
+ "step": 490
670
+ },
671
+ {
672
+ "epoch": 0.34,
673
+ "learning_rate": 1.0303030303030304e-05,
674
+ "loss": 1.6533,
675
+ "step": 495
676
+ },
677
+ {
678
+ "epoch": 0.34,
679
+ "learning_rate": 1.0202020202020202e-05,
680
+ "loss": 1.6899,
681
+ "step": 500
682
+ },
683
+ {
684
+ "epoch": 0.34,
685
+ "eval_loss": 2.1731319427490234,
686
+ "eval_runtime": 127.9869,
687
+ "eval_samples_per_second": 0.781,
688
+ "eval_steps_per_second": 0.102,
689
+ "step": 500
690
+ },
691
+ {
692
+ "epoch": 0.35,
693
+ "learning_rate": 1.0101010101010103e-05,
694
+ "loss": 1.6272,
695
+ "step": 505
696
+ },
697
+ {
698
+ "epoch": 0.35,
699
+ "learning_rate": 1e-05,
700
+ "loss": 1.6105,
701
+ "step": 510
702
+ },
703
+ {
704
+ "epoch": 0.36,
705
+ "learning_rate": 9.8989898989899e-06,
706
+ "loss": 1.522,
707
+ "step": 515
708
+ },
709
+ {
710
+ "epoch": 0.36,
711
+ "learning_rate": 9.797979797979798e-06,
712
+ "loss": 1.6022,
713
+ "step": 520
714
+ },
715
+ {
716
+ "epoch": 0.36,
717
+ "learning_rate": 9.696969696969698e-06,
718
+ "loss": 1.6157,
719
+ "step": 525
720
+ },
721
+ {
722
+ "epoch": 0.37,
723
+ "learning_rate": 9.595959595959597e-06,
724
+ "loss": 1.7323,
725
+ "step": 530
726
+ },
727
+ {
728
+ "epoch": 0.37,
729
+ "learning_rate": 9.494949494949497e-06,
730
+ "loss": 1.5535,
731
+ "step": 535
732
+ },
733
+ {
734
+ "epoch": 0.37,
735
+ "learning_rate": 9.393939393939396e-06,
736
+ "loss": 1.6378,
737
+ "step": 540
738
+ },
739
+ {
740
+ "epoch": 0.38,
741
+ "learning_rate": 9.292929292929294e-06,
742
+ "loss": 1.5394,
743
+ "step": 545
744
+ },
745
+ {
746
+ "epoch": 0.38,
747
+ "learning_rate": 9.191919191919193e-06,
748
+ "loss": 1.6862,
749
+ "step": 550
750
+ },
751
+ {
752
+ "epoch": 0.38,
753
+ "eval_loss": 2.1638994216918945,
754
+ "eval_runtime": 127.6725,
755
+ "eval_samples_per_second": 0.783,
756
+ "eval_steps_per_second": 0.102,
757
+ "step": 550
758
+ },
759
+ {
760
+ "epoch": 0.38,
761
+ "learning_rate": 9.090909090909091e-06,
762
+ "loss": 1.5748,
763
+ "step": 555
764
+ },
765
+ {
766
+ "epoch": 0.39,
767
+ "learning_rate": 8.98989898989899e-06,
768
+ "loss": 1.6246,
769
+ "step": 560
770
+ },
771
+ {
772
+ "epoch": 0.39,
773
+ "learning_rate": 8.888888888888888e-06,
774
+ "loss": 1.6118,
775
+ "step": 565
776
+ },
777
+ {
778
+ "epoch": 0.39,
779
+ "learning_rate": 8.787878787878788e-06,
780
+ "loss": 1.5606,
781
+ "step": 570
782
+ },
783
+ {
784
+ "epoch": 0.4,
785
+ "learning_rate": 8.686868686868687e-06,
786
+ "loss": 1.5704,
787
+ "step": 575
788
+ },
789
+ {
790
+ "epoch": 0.4,
791
+ "learning_rate": 8.585858585858587e-06,
792
+ "loss": 1.6683,
793
+ "step": 580
794
+ },
795
+ {
796
+ "epoch": 0.4,
797
+ "learning_rate": 8.484848484848486e-06,
798
+ "loss": 1.6215,
799
+ "step": 585
800
+ },
801
+ {
802
+ "epoch": 0.41,
803
+ "learning_rate": 8.383838383838384e-06,
804
+ "loss": 1.6727,
805
+ "step": 590
806
+ },
807
+ {
808
+ "epoch": 0.41,
809
+ "learning_rate": 8.282828282828283e-06,
810
+ "loss": 1.5615,
811
+ "step": 595
812
+ },
813
+ {
814
+ "epoch": 0.41,
815
+ "learning_rate": 8.181818181818183e-06,
816
+ "loss": 1.6431,
817
+ "step": 600
818
+ },
819
+ {
820
+ "epoch": 0.41,
821
+ "eval_loss": 2.1520442962646484,
822
+ "eval_runtime": 127.6384,
823
+ "eval_samples_per_second": 0.783,
824
+ "eval_steps_per_second": 0.102,
825
+ "step": 600
826
+ },
827
+ {
828
+ "epoch": 0.42,
829
+ "learning_rate": 8.08080808080808e-06,
830
+ "loss": 1.5967,
831
+ "step": 605
832
+ },
833
+ {
834
+ "epoch": 0.42,
835
+ "learning_rate": 7.97979797979798e-06,
836
+ "loss": 1.6016,
837
+ "step": 610
838
+ },
839
+ {
840
+ "epoch": 0.42,
841
+ "learning_rate": 7.87878787878788e-06,
842
+ "loss": 1.5905,
843
+ "step": 615
844
+ },
845
+ {
846
+ "epoch": 0.43,
847
+ "learning_rate": 7.77777777777778e-06,
848
+ "loss": 1.6387,
849
+ "step": 620
850
+ },
851
+ {
852
+ "epoch": 0.43,
853
+ "learning_rate": 7.676767676767677e-06,
854
+ "loss": 1.6255,
855
+ "step": 625
856
+ },
857
+ {
858
+ "epoch": 0.43,
859
+ "learning_rate": 7.5757575757575764e-06,
860
+ "loss": 1.642,
861
+ "step": 630
862
+ },
863
+ {
864
+ "epoch": 0.44,
865
+ "learning_rate": 7.474747474747476e-06,
866
+ "loss": 1.6004,
867
+ "step": 635
868
+ },
869
+ {
870
+ "epoch": 0.44,
871
+ "learning_rate": 7.373737373737374e-06,
872
+ "loss": 1.5635,
873
+ "step": 640
874
+ },
875
+ {
876
+ "epoch": 0.44,
877
+ "learning_rate": 7.272727272727273e-06,
878
+ "loss": 1.6266,
879
+ "step": 645
880
+ },
881
+ {
882
+ "epoch": 0.45,
883
+ "learning_rate": 7.171717171717172e-06,
884
+ "loss": 1.5906,
885
+ "step": 650
886
+ },
887
+ {
888
+ "epoch": 0.45,
889
+ "eval_loss": 2.1461074352264404,
890
+ "eval_runtime": 126.7907,
891
+ "eval_samples_per_second": 0.789,
892
+ "eval_steps_per_second": 0.103,
893
+ "step": 650
894
+ },
895
+ {
896
+ "epoch": 0.45,
897
+ "learning_rate": 7.070707070707071e-06,
898
+ "loss": 1.5335,
899
+ "step": 655
900
+ },
901
+ {
902
+ "epoch": 0.46,
903
+ "learning_rate": 6.969696969696971e-06,
904
+ "loss": 1.5319,
905
+ "step": 660
906
+ },
907
+ {
908
+ "epoch": 0.46,
909
+ "learning_rate": 6.868686868686869e-06,
910
+ "loss": 1.5643,
911
+ "step": 665
912
+ },
913
+ {
914
+ "epoch": 0.46,
915
+ "learning_rate": 6.767676767676769e-06,
916
+ "loss": 1.635,
917
+ "step": 670
918
+ },
919
+ {
920
+ "epoch": 0.47,
921
+ "learning_rate": 6.666666666666667e-06,
922
+ "loss": 1.5881,
923
+ "step": 675
924
+ },
925
+ {
926
+ "epoch": 0.47,
927
+ "learning_rate": 6.565656565656566e-06,
928
+ "loss": 1.5855,
929
+ "step": 680
930
+ },
931
+ {
932
+ "epoch": 0.47,
933
+ "learning_rate": 6.464646464646466e-06,
934
+ "loss": 1.5281,
935
+ "step": 685
936
+ },
937
+ {
938
+ "epoch": 0.48,
939
+ "learning_rate": 6.363636363636364e-06,
940
+ "loss": 1.5686,
941
+ "step": 690
942
+ },
943
+ {
944
+ "epoch": 0.48,
945
+ "learning_rate": 6.262626262626264e-06,
946
+ "loss": 1.5815,
947
+ "step": 695
948
+ },
949
+ {
950
+ "epoch": 0.48,
951
+ "learning_rate": 6.1616161616161615e-06,
952
+ "loss": 1.6277,
953
+ "step": 700
954
+ },
955
+ {
956
+ "epoch": 0.48,
957
+ "eval_loss": 2.1339261531829834,
958
+ "eval_runtime": 127.7156,
959
+ "eval_samples_per_second": 0.783,
960
+ "eval_steps_per_second": 0.102,
961
+ "step": 700
962
+ },
963
+ {
964
+ "epoch": 0.49,
965
+ "learning_rate": 6.060606060606061e-06,
966
+ "loss": 1.6068,
967
+ "step": 705
968
+ },
969
+ {
970
+ "epoch": 0.49,
971
+ "learning_rate": 5.95959595959596e-06,
972
+ "loss": 1.6653,
973
+ "step": 710
974
+ },
975
+ {
976
+ "epoch": 0.49,
977
+ "learning_rate": 5.858585858585859e-06,
978
+ "loss": 1.5389,
979
+ "step": 715
980
+ },
981
+ {
982
+ "epoch": 0.5,
983
+ "learning_rate": 5.7575757575757586e-06,
984
+ "loss": 1.5535,
985
+ "step": 720
986
+ },
987
+ {
988
+ "epoch": 0.5,
989
+ "learning_rate": 5.656565656565657e-06,
990
+ "loss": 1.621,
991
+ "step": 725
992
+ },
993
+ {
994
+ "epoch": 0.5,
995
+ "learning_rate": 5.555555555555557e-06,
996
+ "loss": 1.56,
997
+ "step": 730
998
+ },
999
+ {
1000
+ "epoch": 0.51,
1001
+ "learning_rate": 5.4545454545454545e-06,
1002
+ "loss": 1.5491,
1003
+ "step": 735
1004
+ },
1005
+ {
1006
+ "epoch": 0.51,
1007
+ "learning_rate": 5.353535353535354e-06,
1008
+ "loss": 1.5151,
1009
+ "step": 740
1010
+ },
1011
+ {
1012
+ "epoch": 0.51,
1013
+ "learning_rate": 5.252525252525253e-06,
1014
+ "loss": 1.5135,
1015
+ "step": 745
1016
+ },
1017
+ {
1018
+ "epoch": 0.52,
1019
+ "learning_rate": 5.151515151515152e-06,
1020
+ "loss": 1.4665,
1021
+ "step": 750
1022
+ },
1023
+ {
1024
+ "epoch": 0.52,
1025
+ "eval_loss": 2.127960681915283,
1026
+ "eval_runtime": 126.9672,
1027
+ "eval_samples_per_second": 0.788,
1028
+ "eval_steps_per_second": 0.102,
1029
+ "step": 750
1030
+ },
1031
+ {
1032
+ "epoch": 0.52,
1033
+ "learning_rate": 5.0505050505050515e-06,
1034
+ "loss": 1.5305,
1035
+ "step": 755
1036
+ },
1037
+ {
1038
+ "epoch": 0.52,
1039
+ "learning_rate": 4.94949494949495e-06,
1040
+ "loss": 1.531,
1041
+ "step": 760
1042
+ },
1043
+ {
1044
+ "epoch": 0.53,
1045
+ "learning_rate": 4.848484848484849e-06,
1046
+ "loss": 1.6151,
1047
+ "step": 765
1048
+ },
1049
+ {
1050
+ "epoch": 0.53,
1051
+ "learning_rate": 4.747474747474748e-06,
1052
+ "loss": 1.5451,
1053
+ "step": 770
1054
+ },
1055
+ {
1056
+ "epoch": 0.53,
1057
+ "learning_rate": 4.646464646464647e-06,
1058
+ "loss": 1.5876,
1059
+ "step": 775
1060
+ },
1061
+ {
1062
+ "epoch": 0.54,
1063
+ "learning_rate": 4.5454545454545455e-06,
1064
+ "loss": 1.4907,
1065
+ "step": 780
1066
+ },
1067
+ {
1068
+ "epoch": 0.54,
1069
+ "learning_rate": 4.444444444444444e-06,
1070
+ "loss": 1.5307,
1071
+ "step": 785
1072
+ },
1073
+ {
1074
+ "epoch": 0.54,
1075
+ "learning_rate": 4.343434343434344e-06,
1076
+ "loss": 1.4979,
1077
+ "step": 790
1078
+ },
1079
+ {
1080
+ "epoch": 0.55,
1081
+ "learning_rate": 4.242424242424243e-06,
1082
+ "loss": 1.5098,
1083
+ "step": 795
1084
+ },
1085
+ {
1086
+ "epoch": 0.55,
1087
+ "learning_rate": 4.141414141414142e-06,
1088
+ "loss": 1.6528,
1089
+ "step": 800
1090
+ },
1091
+ {
1092
+ "epoch": 0.55,
1093
+ "eval_loss": 2.1138248443603516,
1094
+ "eval_runtime": 127.0916,
1095
+ "eval_samples_per_second": 0.787,
1096
+ "eval_steps_per_second": 0.102,
1097
+ "step": 800
1098
+ },
1099
+ {
1100
+ "epoch": 0.56,
1101
+ "learning_rate": 4.04040404040404e-06,
1102
+ "loss": 1.5037,
1103
+ "step": 805
1104
+ },
1105
+ {
1106
+ "epoch": 0.56,
1107
+ "learning_rate": 3.93939393939394e-06,
1108
+ "loss": 1.5584,
1109
+ "step": 810
1110
+ },
1111
+ {
1112
+ "epoch": 0.56,
1113
+ "learning_rate": 3.8383838383838385e-06,
1114
+ "loss": 1.5794,
1115
+ "step": 815
1116
+ },
1117
+ {
1118
+ "epoch": 0.57,
1119
+ "learning_rate": 3.737373737373738e-06,
1120
+ "loss": 1.4932,
1121
+ "step": 820
1122
+ },
1123
+ {
1124
+ "epoch": 0.57,
1125
+ "learning_rate": 3.6363636363636366e-06,
1126
+ "loss": 1.5709,
1127
+ "step": 825
1128
+ },
1129
+ {
1130
+ "epoch": 0.57,
1131
+ "learning_rate": 3.5353535353535356e-06,
1132
+ "loss": 1.5937,
1133
+ "step": 830
1134
+ },
1135
+ {
1136
+ "epoch": 0.58,
1137
+ "learning_rate": 3.4343434343434347e-06,
1138
+ "loss": 1.5733,
1139
+ "step": 835
1140
+ },
1141
+ {
1142
+ "epoch": 0.58,
1143
+ "learning_rate": 3.3333333333333333e-06,
1144
+ "loss": 1.5532,
1145
+ "step": 840
1146
+ },
1147
+ {
1148
+ "epoch": 0.58,
1149
+ "learning_rate": 3.232323232323233e-06,
1150
+ "loss": 1.531,
1151
+ "step": 845
1152
+ },
1153
+ {
1154
+ "epoch": 0.59,
1155
+ "learning_rate": 3.131313131313132e-06,
1156
+ "loss": 1.4618,
1157
+ "step": 850
1158
+ },
1159
+ {
1160
+ "epoch": 0.59,
1161
+ "eval_loss": 2.104820489883423,
1162
+ "eval_runtime": 126.9789,
1163
+ "eval_samples_per_second": 0.788,
1164
+ "eval_steps_per_second": 0.102,
1165
+ "step": 850
1166
+ },
1167
+ {
1168
+ "epoch": 0.59,
1169
+ "learning_rate": 3.0303030303030305e-06,
1170
+ "loss": 1.5802,
1171
+ "step": 855
1172
+ },
1173
+ {
1174
+ "epoch": 0.59,
1175
+ "learning_rate": 2.9292929292929295e-06,
1176
+ "loss": 1.4664,
1177
+ "step": 860
1178
+ },
1179
+ {
1180
+ "epoch": 0.6,
1181
+ "learning_rate": 2.8282828282828286e-06,
1182
+ "loss": 1.6319,
1183
+ "step": 865
1184
+ },
1185
+ {
1186
+ "epoch": 0.6,
1187
+ "learning_rate": 2.7272727272727272e-06,
1188
+ "loss": 1.5629,
1189
+ "step": 870
1190
+ },
1191
+ {
1192
+ "epoch": 0.6,
1193
+ "learning_rate": 2.6262626262626267e-06,
1194
+ "loss": 1.5942,
1195
+ "step": 875
1196
+ },
1197
+ {
1198
+ "epoch": 0.61,
1199
+ "learning_rate": 2.5252525252525258e-06,
1200
+ "loss": 1.5522,
1201
+ "step": 880
1202
+ },
1203
+ {
1204
+ "epoch": 0.61,
1205
+ "learning_rate": 2.4242424242424244e-06,
1206
+ "loss": 1.5326,
1207
+ "step": 885
1208
+ },
1209
+ {
1210
+ "epoch": 0.61,
1211
+ "learning_rate": 2.3232323232323234e-06,
1212
+ "loss": 1.5492,
1213
+ "step": 890
1214
+ },
1215
+ {
1216
+ "epoch": 0.62,
1217
+ "learning_rate": 2.222222222222222e-06,
1218
+ "loss": 1.483,
1219
+ "step": 895
1220
+ },
1221
+ {
1222
+ "epoch": 0.62,
1223
+ "learning_rate": 2.1212121212121216e-06,
1224
+ "loss": 1.4871,
1225
+ "step": 900
1226
+ },
1227
+ {
1228
+ "epoch": 0.62,
1229
+ "eval_loss": 2.096195936203003,
1230
+ "eval_runtime": 127.2807,
1231
+ "eval_samples_per_second": 0.786,
1232
+ "eval_steps_per_second": 0.102,
1233
+ "step": 900
1234
+ },
1235
+ {
1236
+ "epoch": 0.62,
1237
+ "learning_rate": 2.02020202020202e-06,
1238
+ "loss": 1.4819,
1239
+ "step": 905
1240
+ },
1241
+ {
1242
+ "epoch": 0.63,
1243
+ "learning_rate": 1.9191919191919192e-06,
1244
+ "loss": 1.5326,
1245
+ "step": 910
1246
+ },
1247
+ {
1248
+ "epoch": 0.63,
1249
+ "learning_rate": 1.8181818181818183e-06,
1250
+ "loss": 1.5267,
1251
+ "step": 915
1252
+ },
1253
+ {
1254
+ "epoch": 0.63,
1255
+ "learning_rate": 1.7171717171717173e-06,
1256
+ "loss": 1.422,
1257
+ "step": 920
1258
+ },
1259
+ {
1260
+ "epoch": 0.64,
1261
+ "learning_rate": 1.6161616161616164e-06,
1262
+ "loss": 1.5205,
1263
+ "step": 925
1264
+ },
1265
+ {
1266
+ "epoch": 0.64,
1267
+ "learning_rate": 1.5151515151515152e-06,
1268
+ "loss": 1.5186,
1269
+ "step": 930
1270
+ },
1271
+ {
1272
+ "epoch": 0.64,
1273
+ "learning_rate": 1.4141414141414143e-06,
1274
+ "loss": 1.5456,
1275
+ "step": 935
1276
+ },
1277
+ {
1278
+ "epoch": 0.65,
1279
+ "learning_rate": 1.3131313131313134e-06,
1280
+ "loss": 1.5378,
1281
+ "step": 940
1282
+ },
1283
+ {
1284
+ "epoch": 0.65,
1285
+ "learning_rate": 1.2121212121212122e-06,
1286
+ "loss": 1.4828,
1287
+ "step": 945
1288
+ },
1289
+ {
1290
+ "epoch": 0.66,
1291
+ "learning_rate": 1.111111111111111e-06,
1292
+ "loss": 1.6019,
1293
+ "step": 950
1294
+ },
1295
+ {
1296
+ "epoch": 0.66,
1297
+ "eval_loss": 2.090965509414673,
1298
+ "eval_runtime": 126.05,
1299
+ "eval_samples_per_second": 0.793,
1300
+ "eval_steps_per_second": 0.103,
1301
+ "step": 950
1302
+ },
1303
+ {
1304
+ "epoch": 0.66,
1305
+ "learning_rate": 1.01010101010101e-06,
1306
+ "loss": 1.4365,
1307
+ "step": 955
1308
+ },
1309
+ {
1310
+ "epoch": 0.66,
1311
+ "learning_rate": 9.090909090909091e-07,
1312
+ "loss": 1.4373,
1313
+ "step": 960
1314
+ },
1315
+ {
1316
+ "epoch": 0.67,
1317
+ "learning_rate": 8.080808080808082e-07,
1318
+ "loss": 1.5253,
1319
+ "step": 965
1320
+ },
1321
+ {
1322
+ "epoch": 0.67,
1323
+ "learning_rate": 7.070707070707071e-07,
1324
+ "loss": 1.6014,
1325
+ "step": 970
1326
+ },
1327
+ {
1328
+ "epoch": 0.67,
1329
+ "learning_rate": 6.060606060606061e-07,
1330
+ "loss": 1.549,
1331
+ "step": 975
1332
+ },
1333
+ {
1334
+ "epoch": 0.68,
1335
+ "learning_rate": 5.05050505050505e-07,
1336
+ "loss": 1.5057,
1337
+ "step": 980
1338
+ },
1339
+ {
1340
+ "epoch": 0.68,
1341
+ "learning_rate": 4.040404040404041e-07,
1342
+ "loss": 1.4688,
1343
+ "step": 985
1344
+ },
1345
+ {
1346
+ "epoch": 0.68,
1347
+ "learning_rate": 3.0303030303030305e-07,
1348
+ "loss": 1.4982,
1349
+ "step": 990
1350
+ },
1351
+ {
1352
+ "epoch": 0.69,
1353
+ "learning_rate": 2.0202020202020205e-07,
1354
+ "loss": 1.5032,
1355
+ "step": 995
1356
+ },
1357
+ {
1358
+ "epoch": 0.69,
1359
+ "learning_rate": 1.0101010101010103e-07,
1360
+ "loss": 1.5359,
1361
+ "step": 1000
1362
+ },
1363
+ {
1364
+ "epoch": 0.69,
1365
+ "eval_loss": 2.0878255367279053,
1366
+ "eval_runtime": 126.9736,
1367
+ "eval_samples_per_second": 0.788,
1368
+ "eval_steps_per_second": 0.102,
1369
+ "step": 1000
1370
+ }
1371
+ ],
1372
+ "logging_steps": 5,
1373
+ "max_steps": 1000,
1374
+ "num_train_epochs": 1,
1375
+ "save_steps": 100,
1376
+ "total_flos": 50751528222720.0,
1377
+ "trial_name": null,
1378
+ "trial_params": null
1379
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:800fd3f2be062971c880ead3b73ddbd3d3d5ee56bef4666211ccf35bf9c3c4bb
3
+ size 6651
zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage <= 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage <= 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage <= 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage <= 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)