Mar2Ding commited on
Commit
5f1e7c6
1 Parent(s): 4ddc05d

Upload 11 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,652 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<0>": 92544,
3
+ "<100>": 92545,
4
+ "<101>": 92546,
5
+ "<102>": 92547,
6
+ "<103>": 92548,
7
+ "<104>": 92549,
8
+ "<105>": 92550,
9
+ "<106>": 92551,
10
+ "<107>": 92552,
11
+ "<108>": 92553,
12
+ "<109>": 92554,
13
+ "<10>": 92555,
14
+ "<110>": 92556,
15
+ "<111>": 92557,
16
+ "<112>": 92558,
17
+ "<113>": 92559,
18
+ "<114>": 92560,
19
+ "<115>": 92561,
20
+ "<116>": 92562,
21
+ "<117>": 92563,
22
+ "<118>": 92564,
23
+ "<119>": 92565,
24
+ "<11>": 92566,
25
+ "<120>": 92567,
26
+ "<121>": 92568,
27
+ "<122>": 92569,
28
+ "<123>": 92570,
29
+ "<124>": 92571,
30
+ "<125>": 92572,
31
+ "<126>": 92573,
32
+ "<127>": 92574,
33
+ "<128>": 92575,
34
+ "<129>": 92576,
35
+ "<12>": 92577,
36
+ "<130>": 92578,
37
+ "<131>": 92579,
38
+ "<132>": 92580,
39
+ "<133>": 92581,
40
+ "<134>": 92582,
41
+ "<135>": 92583,
42
+ "<136>": 92584,
43
+ "<137>": 92585,
44
+ "<138>": 92586,
45
+ "<139>": 92587,
46
+ "<13>": 92588,
47
+ "<140>": 92589,
48
+ "<141>": 92590,
49
+ "<142>": 92591,
50
+ "<143>": 92592,
51
+ "<144>": 92593,
52
+ "<145>": 92594,
53
+ "<146>": 92595,
54
+ "<147>": 92596,
55
+ "<148>": 92597,
56
+ "<149>": 92598,
57
+ "<14>": 92599,
58
+ "<150>": 92600,
59
+ "<151>": 92601,
60
+ "<152>": 92602,
61
+ "<153>": 92603,
62
+ "<154>": 92604,
63
+ "<155>": 92605,
64
+ "<156>": 92606,
65
+ "<157>": 92607,
66
+ "<158>": 92608,
67
+ "<159>": 92609,
68
+ "<15>": 92610,
69
+ "<160>": 92611,
70
+ "<161>": 92612,
71
+ "<162>": 92613,
72
+ "<163>": 92614,
73
+ "<164>": 92615,
74
+ "<165>": 92616,
75
+ "<166>": 92617,
76
+ "<167>": 92618,
77
+ "<168>": 92619,
78
+ "<169>": 92620,
79
+ "<16>": 92621,
80
+ "<170>": 92622,
81
+ "<171>": 92623,
82
+ "<172>": 92624,
83
+ "<173>": 92625,
84
+ "<174>": 92626,
85
+ "<175>": 92627,
86
+ "<176>": 92628,
87
+ "<177>": 92629,
88
+ "<178>": 92630,
89
+ "<179>": 92631,
90
+ "<17>": 92632,
91
+ "<180>": 92633,
92
+ "<181>": 92634,
93
+ "<182>": 92635,
94
+ "<183>": 92636,
95
+ "<184>": 92637,
96
+ "<185>": 92638,
97
+ "<186>": 92639,
98
+ "<187>": 92640,
99
+ "<188>": 92641,
100
+ "<189>": 92642,
101
+ "<18>": 92643,
102
+ "<190>": 92644,
103
+ "<191>": 92645,
104
+ "<192>": 92646,
105
+ "<193>": 92647,
106
+ "<194>": 92648,
107
+ "<195>": 92649,
108
+ "<196>": 92650,
109
+ "<197>": 92651,
110
+ "<198>": 92652,
111
+ "<199>": 92653,
112
+ "<19>": 92654,
113
+ "<1>": 92655,
114
+ "<200>": 92656,
115
+ "<201>": 92657,
116
+ "<202>": 92658,
117
+ "<203>": 92659,
118
+ "<204>": 92660,
119
+ "<205>": 92661,
120
+ "<206>": 92662,
121
+ "<207>": 92663,
122
+ "<208>": 92664,
123
+ "<209>": 92665,
124
+ "<20>": 92666,
125
+ "<210>": 92667,
126
+ "<211>": 92668,
127
+ "<212>": 92669,
128
+ "<213>": 92670,
129
+ "<214>": 92671,
130
+ "<215>": 92672,
131
+ "<216>": 92673,
132
+ "<217>": 92674,
133
+ "<218>": 92675,
134
+ "<219>": 92676,
135
+ "<21>": 92677,
136
+ "<220>": 92678,
137
+ "<221>": 92679,
138
+ "<222>": 92680,
139
+ "<223>": 92681,
140
+ "<224>": 92682,
141
+ "<225>": 92683,
142
+ "<226>": 92684,
143
+ "<227>": 92685,
144
+ "<228>": 92686,
145
+ "<229>": 92687,
146
+ "<22>": 92688,
147
+ "<230>": 92689,
148
+ "<231>": 92690,
149
+ "<232>": 92691,
150
+ "<233>": 92692,
151
+ "<234>": 92693,
152
+ "<235>": 92694,
153
+ "<236>": 92695,
154
+ "<237>": 92696,
155
+ "<238>": 92697,
156
+ "<239>": 92698,
157
+ "<23>": 92699,
158
+ "<240>": 92700,
159
+ "<241>": 92701,
160
+ "<242>": 92702,
161
+ "<243>": 92703,
162
+ "<244>": 92704,
163
+ "<245>": 92705,
164
+ "<246>": 92706,
165
+ "<247>": 92707,
166
+ "<248>": 92708,
167
+ "<249>": 92709,
168
+ "<24>": 92710,
169
+ "<250>": 92711,
170
+ "<251>": 92712,
171
+ "<252>": 92713,
172
+ "<253>": 92714,
173
+ "<254>": 92715,
174
+ "<255>": 92716,
175
+ "<256>": 92717,
176
+ "<257>": 92718,
177
+ "<258>": 92719,
178
+ "<259>": 92720,
179
+ "<25>": 92721,
180
+ "<260>": 92722,
181
+ "<261>": 92723,
182
+ "<262>": 92724,
183
+ "<263>": 92725,
184
+ "<264>": 92726,
185
+ "<265>": 92727,
186
+ "<266>": 92728,
187
+ "<267>": 92729,
188
+ "<268>": 92730,
189
+ "<269>": 92731,
190
+ "<26>": 92732,
191
+ "<270>": 92733,
192
+ "<271>": 92734,
193
+ "<272>": 92735,
194
+ "<273>": 92736,
195
+ "<274>": 92737,
196
+ "<275>": 92738,
197
+ "<276>": 92739,
198
+ "<277>": 92740,
199
+ "<278>": 92741,
200
+ "<279>": 92742,
201
+ "<27>": 92743,
202
+ "<280>": 92744,
203
+ "<281>": 92745,
204
+ "<282>": 92746,
205
+ "<283>": 92747,
206
+ "<284>": 92748,
207
+ "<285>": 92749,
208
+ "<286>": 92750,
209
+ "<287>": 92751,
210
+ "<288>": 92752,
211
+ "<289>": 92753,
212
+ "<28>": 92754,
213
+ "<290>": 92755,
214
+ "<291>": 92756,
215
+ "<292>": 92757,
216
+ "<293>": 92758,
217
+ "<294>": 92759,
218
+ "<295>": 92760,
219
+ "<296>": 92761,
220
+ "<297>": 92762,
221
+ "<298>": 92763,
222
+ "<299>": 92764,
223
+ "<29>": 92765,
224
+ "<2>": 92766,
225
+ "<300>": 92767,
226
+ "<301>": 92768,
227
+ "<302>": 92769,
228
+ "<303>": 92770,
229
+ "<304>": 92771,
230
+ "<305>": 92772,
231
+ "<306>": 92773,
232
+ "<307>": 92774,
233
+ "<308>": 92775,
234
+ "<309>": 92776,
235
+ "<30>": 92777,
236
+ "<310>": 92778,
237
+ "<311>": 92779,
238
+ "<312>": 92780,
239
+ "<313>": 92781,
240
+ "<314>": 92782,
241
+ "<315>": 92783,
242
+ "<316>": 92784,
243
+ "<317>": 92785,
244
+ "<318>": 92786,
245
+ "<319>": 92787,
246
+ "<31>": 92788,
247
+ "<320>": 92789,
248
+ "<321>": 92790,
249
+ "<322>": 92791,
250
+ "<323>": 92792,
251
+ "<324>": 92793,
252
+ "<325>": 92794,
253
+ "<326>": 92795,
254
+ "<327>": 92796,
255
+ "<328>": 92797,
256
+ "<329>": 92798,
257
+ "<32>": 92799,
258
+ "<330>": 92800,
259
+ "<331>": 92801,
260
+ "<332>": 92802,
261
+ "<333>": 92803,
262
+ "<334>": 92804,
263
+ "<335>": 92805,
264
+ "<336>": 92806,
265
+ "<337>": 92807,
266
+ "<338>": 92808,
267
+ "<339>": 92809,
268
+ "<33>": 92810,
269
+ "<340>": 92811,
270
+ "<341>": 92812,
271
+ "<342>": 92813,
272
+ "<343>": 92814,
273
+ "<344>": 92815,
274
+ "<345>": 92816,
275
+ "<346>": 92817,
276
+ "<347>": 92818,
277
+ "<348>": 92819,
278
+ "<349>": 92820,
279
+ "<34>": 92821,
280
+ "<350>": 92822,
281
+ "<351>": 92823,
282
+ "<352>": 92824,
283
+ "<353>": 92825,
284
+ "<354>": 92826,
285
+ "<355>": 92827,
286
+ "<356>": 92828,
287
+ "<357>": 92829,
288
+ "<358>": 92830,
289
+ "<359>": 92831,
290
+ "<35>": 92832,
291
+ "<360>": 92833,
292
+ "<361>": 92834,
293
+ "<362>": 92835,
294
+ "<363>": 92836,
295
+ "<364>": 92837,
296
+ "<365>": 92838,
297
+ "<366>": 92839,
298
+ "<367>": 92840,
299
+ "<368>": 92841,
300
+ "<369>": 92842,
301
+ "<36>": 92843,
302
+ "<370>": 92844,
303
+ "<371>": 92845,
304
+ "<372>": 92846,
305
+ "<373>": 92847,
306
+ "<374>": 92848,
307
+ "<375>": 92849,
308
+ "<376>": 92850,
309
+ "<377>": 92851,
310
+ "<378>": 92852,
311
+ "<379>": 92853,
312
+ "<37>": 92854,
313
+ "<380>": 92855,
314
+ "<381>": 92856,
315
+ "<382>": 92857,
316
+ "<383>": 92858,
317
+ "<384>": 92859,
318
+ "<385>": 92860,
319
+ "<386>": 92861,
320
+ "<387>": 92862,
321
+ "<388>": 92863,
322
+ "<389>": 92864,
323
+ "<38>": 92865,
324
+ "<390>": 92866,
325
+ "<391>": 92867,
326
+ "<392>": 92868,
327
+ "<393>": 92869,
328
+ "<394>": 92870,
329
+ "<395>": 92871,
330
+ "<396>": 92872,
331
+ "<397>": 92873,
332
+ "<398>": 92874,
333
+ "<399>": 92875,
334
+ "<39>": 92876,
335
+ "<3>": 92877,
336
+ "<400>": 92878,
337
+ "<401>": 92879,
338
+ "<402>": 92880,
339
+ "<403>": 92881,
340
+ "<404>": 92882,
341
+ "<405>": 92883,
342
+ "<406>": 92884,
343
+ "<407>": 92885,
344
+ "<408>": 92886,
345
+ "<409>": 92887,
346
+ "<40>": 92888,
347
+ "<410>": 92889,
348
+ "<411>": 92890,
349
+ "<412>": 92891,
350
+ "<413>": 92892,
351
+ "<414>": 92893,
352
+ "<415>": 92894,
353
+ "<416>": 92895,
354
+ "<417>": 92896,
355
+ "<418>": 92897,
356
+ "<419>": 92898,
357
+ "<41>": 92899,
358
+ "<420>": 92900,
359
+ "<421>": 92901,
360
+ "<422>": 92902,
361
+ "<423>": 92903,
362
+ "<424>": 92904,
363
+ "<425>": 92905,
364
+ "<426>": 92906,
365
+ "<427>": 92907,
366
+ "<428>": 92908,
367
+ "<429>": 92909,
368
+ "<42>": 92910,
369
+ "<430>": 92911,
370
+ "<431>": 92912,
371
+ "<432>": 92913,
372
+ "<433>": 92914,
373
+ "<434>": 92915,
374
+ "<435>": 92916,
375
+ "<436>": 92917,
376
+ "<437>": 92918,
377
+ "<438>": 92919,
378
+ "<439>": 92920,
379
+ "<43>": 92921,
380
+ "<440>": 92922,
381
+ "<441>": 92923,
382
+ "<442>": 92924,
383
+ "<443>": 92925,
384
+ "<444>": 92926,
385
+ "<445>": 92927,
386
+ "<446>": 92928,
387
+ "<447>": 92929,
388
+ "<448>": 92930,
389
+ "<449>": 92931,
390
+ "<44>": 92932,
391
+ "<450>": 92933,
392
+ "<451>": 92934,
393
+ "<452>": 92935,
394
+ "<453>": 92936,
395
+ "<454>": 92937,
396
+ "<455>": 92938,
397
+ "<456>": 92939,
398
+ "<457>": 92940,
399
+ "<458>": 92941,
400
+ "<459>": 92942,
401
+ "<45>": 92943,
402
+ "<460>": 92944,
403
+ "<461>": 92945,
404
+ "<462>": 92946,
405
+ "<463>": 92947,
406
+ "<464>": 92948,
407
+ "<465>": 92949,
408
+ "<466>": 92950,
409
+ "<467>": 92951,
410
+ "<468>": 92952,
411
+ "<469>": 92953,
412
+ "<46>": 92954,
413
+ "<470>": 92955,
414
+ "<471>": 92956,
415
+ "<472>": 92957,
416
+ "<473>": 92958,
417
+ "<474>": 92959,
418
+ "<475>": 92960,
419
+ "<476>": 92961,
420
+ "<477>": 92962,
421
+ "<478>": 92963,
422
+ "<479>": 92964,
423
+ "<47>": 92965,
424
+ "<480>": 92966,
425
+ "<481>": 92967,
426
+ "<482>": 92968,
427
+ "<483>": 92969,
428
+ "<484>": 92970,
429
+ "<485>": 92971,
430
+ "<486>": 92972,
431
+ "<487>": 92973,
432
+ "<488>": 92974,
433
+ "<489>": 92975,
434
+ "<48>": 92976,
435
+ "<490>": 92977,
436
+ "<491>": 92978,
437
+ "<492>": 92979,
438
+ "<493>": 92980,
439
+ "<494>": 92981,
440
+ "<495>": 92982,
441
+ "<496>": 92983,
442
+ "<497>": 92984,
443
+ "<498>": 92985,
444
+ "<499>": 92986,
445
+ "<49>": 92987,
446
+ "<4>": 92988,
447
+ "<500>": 92989,
448
+ "<501>": 92990,
449
+ "<502>": 92991,
450
+ "<503>": 92992,
451
+ "<504>": 92993,
452
+ "<505>": 92994,
453
+ "<506>": 92995,
454
+ "<507>": 92996,
455
+ "<508>": 92997,
456
+ "<509>": 92998,
457
+ "<50>": 92999,
458
+ "<510>": 93000,
459
+ "<511>": 93001,
460
+ "<51>": 93002,
461
+ "<52>": 93003,
462
+ "<53>": 93004,
463
+ "<54>": 93005,
464
+ "<55>": 93006,
465
+ "<56>": 93007,
466
+ "<57>": 93008,
467
+ "<58>": 93009,
468
+ "<59>": 93010,
469
+ "<5>": 93011,
470
+ "<60>": 93012,
471
+ "<61>": 93013,
472
+ "<62>": 93014,
473
+ "<63>": 93015,
474
+ "<64>": 93016,
475
+ "<65>": 93017,
476
+ "<66>": 93018,
477
+ "<67>": 93019,
478
+ "<68>": 93020,
479
+ "<69>": 93021,
480
+ "<6>": 93022,
481
+ "<70>": 93023,
482
+ "<71>": 93024,
483
+ "<72>": 93025,
484
+ "<73>": 93026,
485
+ "<74>": 93027,
486
+ "<75>": 93028,
487
+ "<76>": 93029,
488
+ "<77>": 93030,
489
+ "<78>": 93031,
490
+ "<79>": 93032,
491
+ "<7>": 93033,
492
+ "<80>": 93034,
493
+ "<81>": 93035,
494
+ "<82>": 93036,
495
+ "<83>": 93037,
496
+ "<84>": 93038,
497
+ "<85>": 93039,
498
+ "<86>": 93040,
499
+ "<87>": 93041,
500
+ "<88>": 93042,
501
+ "<89>": 93043,
502
+ "<8>": 93044,
503
+ "<90>": 93045,
504
+ "<91>": 93046,
505
+ "<92>": 93047,
506
+ "<93>": 93048,
507
+ "<94>": 93049,
508
+ "<95>": 93050,
509
+ "<96>": 93051,
510
+ "<97>": 93052,
511
+ "<98>": 93053,
512
+ "<99>": 93054,
513
+ "<9>": 93055,
514
+ "<A#-1>": 93056,
515
+ "<A#0>": 93057,
516
+ "<A#1>": 93058,
517
+ "<A#2>": 93059,
518
+ "<A#3>": 93060,
519
+ "<A#4>": 93061,
520
+ "<A#5>": 93062,
521
+ "<A#6>": 93063,
522
+ "<A#7>": 93064,
523
+ "<A#8>": 93065,
524
+ "<A#9>": 93066,
525
+ "<A-1>": 93067,
526
+ "<A0>": 93068,
527
+ "<A1>": 93069,
528
+ "<A2>": 93070,
529
+ "<A3>": 93071,
530
+ "<A4>": 93072,
531
+ "<A5>": 93073,
532
+ "<A6>": 93074,
533
+ "<A7>": 93075,
534
+ "<A8>": 93076,
535
+ "<A9>": 93077,
536
+ "<B-1>": 93078,
537
+ "<B0>": 93079,
538
+ "<B1>": 93080,
539
+ "<B2>": 93081,
540
+ "<B3>": 93082,
541
+ "<B4>": 93083,
542
+ "<B5>": 93084,
543
+ "<B6>": 93085,
544
+ "<B7>": 93086,
545
+ "<B8>": 93087,
546
+ "<B9>": 93088,
547
+ "<C#-1>": 93089,
548
+ "<C#0>": 93090,
549
+ "<C#1>": 93091,
550
+ "<C#2>": 93092,
551
+ "<C#3>": 93093,
552
+ "<C#4>": 93094,
553
+ "<C#5>": 93095,
554
+ "<C#6>": 93096,
555
+ "<C#7>": 93097,
556
+ "<C#8>": 93098,
557
+ "<C#9>": 93099,
558
+ "<C-1>": 93100,
559
+ "<C0>": 93101,
560
+ "<C1>": 93102,
561
+ "<C2>": 93103,
562
+ "<C3>": 93104,
563
+ "<C4>": 93105,
564
+ "<C5>": 93106,
565
+ "<C6>": 93107,
566
+ "<C7>": 93108,
567
+ "<C8>": 93109,
568
+ "<C9>": 93110,
569
+ "<D#-1>": 93111,
570
+ "<D#0>": 93112,
571
+ "<D#1>": 93113,
572
+ "<D#2>": 93114,
573
+ "<D#3>": 93115,
574
+ "<D#4>": 93116,
575
+ "<D#5>": 93117,
576
+ "<D#6>": 93118,
577
+ "<D#7>": 93119,
578
+ "<D#8>": 93120,
579
+ "<D#9>": 93121,
580
+ "<D-1>": 93122,
581
+ "<D0>": 93123,
582
+ "<D1>": 93124,
583
+ "<D2>": 93125,
584
+ "<D3>": 93126,
585
+ "<D4>": 93127,
586
+ "<D5>": 93128,
587
+ "<D6>": 93129,
588
+ "<D7>": 93130,
589
+ "<D8>": 93131,
590
+ "<D9>": 93132,
591
+ "<E-1>": 93133,
592
+ "<E0>": 93134,
593
+ "<E1>": 93135,
594
+ "<E2>": 93136,
595
+ "<E3>": 93137,
596
+ "<E4>": 93138,
597
+ "<E5>": 93139,
598
+ "<E6>": 93140,
599
+ "<E7>": 93141,
600
+ "<E8>": 93142,
601
+ "<E9>": 93143,
602
+ "<F#-1>": 93144,
603
+ "<F#0>": 93145,
604
+ "<F#1>": 93146,
605
+ "<F#2>": 93147,
606
+ "<F#3>": 93148,
607
+ "<F#4>": 93149,
608
+ "<F#5>": 93150,
609
+ "<F#6>": 93151,
610
+ "<F#7>": 93152,
611
+ "<F#8>": 93153,
612
+ "<F#9>": 93154,
613
+ "<F-1>": 93155,
614
+ "<F0>": 93156,
615
+ "<F1>": 93157,
616
+ "<F2>": 93158,
617
+ "<F3>": 93159,
618
+ "<F4>": 93160,
619
+ "<F5>": 93161,
620
+ "<F6>": 93162,
621
+ "<F7>": 93163,
622
+ "<F8>": 93164,
623
+ "<F9>": 93165,
624
+ "<G#-1>": 93166,
625
+ "<G#0>": 93167,
626
+ "<G#1>": 93168,
627
+ "<G#2>": 93169,
628
+ "<G#3>": 93170,
629
+ "<G#4>": 93171,
630
+ "<G#5>": 93172,
631
+ "<G#6>": 93173,
632
+ "<G#7>": 93174,
633
+ "<G#8>": 93175,
634
+ "<G#9>": 93176,
635
+ "<G-1>": 93177,
636
+ "<G0>": 93178,
637
+ "<G1>": 93179,
638
+ "<G2>": 93180,
639
+ "<G3>": 93181,
640
+ "<G4>": 93182,
641
+ "<G5>": 93183,
642
+ "<G6>": 93184,
643
+ "<G7>": 93185,
644
+ "<G8>": 93186,
645
+ "<G9>": 93187,
646
+ "<bol>": 93188,
647
+ "<bom>": 93189,
648
+ "<bop>": 93190,
649
+ "<eol>": 93191,
650
+ "<eom>": 93192,
651
+ "<eop>": 93193
652
+ }
build_mlp.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import re
4
+ import math
5
+ from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
6
+
7
+
8
+ def build_vision_tower():
9
+ vision_tower = '/mnt/petrelfs/share_data/dongxiaoyi/share_models/clip_l_336'
10
+ return CLIPVisionTower(vision_tower)
11
+
12
+
13
+ def build_vision_projector():
14
+ projector_type = 'mlp2x_gelu'
15
+ mm_hidden_size = 1024
16
+ hidden_size = 4096
17
+
18
+ mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
19
+ if mlp_gelu_match:
20
+ mlp_depth = int(mlp_gelu_match.group(1))
21
+ modules = [nn.Linear(mm_hidden_size, hidden_size)]
22
+ for _ in range(1, mlp_depth):
23
+ modules.append(nn.GELU())
24
+ modules.append(nn.Linear(hidden_size, hidden_size))
25
+ return nn.Sequential(*modules)
26
+
27
+ if projector_type == 'identity':
28
+ return IdentityMap()
29
+
30
+ raise ValueError(f'Unknown projector type: {projector_type}')
31
+
32
+ class IdentityMap(nn.Module):
33
+ def __init__(self):
34
+ super().__init__()
35
+
36
+ def forward(self, x, *args, **kwargs):
37
+ return x
38
+
39
+ @property
40
+ def config(self):
41
+ return {"mm_projector_type": 'identity'}
42
+
43
+
44
+ class CLIPVisionTower(nn.Module):
45
+ def __init__(self, vision_tower):
46
+ super().__init__()
47
+
48
+ self.is_loaded = False
49
+ self.is_resize_pos = False
50
+
51
+ self.vision_tower_name = vision_tower
52
+ self.select_layer = -1
53
+ self.select_feature = 'patch'
54
+ self.load_model()
55
+ #self.resize_pos()
56
+
57
+ def load_model(self):
58
+ self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name)
59
+ self.vision_tower.requires_grad_(False)
60
+
61
+ self.is_loaded = True
62
+ def resize_pos(self):
63
+ pos_embed_checkpoint = self.vision_tower.vision_model.embeddings.position_embedding.weight
64
+ pos_embed_checkpoint = pos_embed_checkpoint.unsqueeze(0)
65
+ orig_size = 24
66
+ new_size = 16
67
+
68
+ if pos_embed_checkpoint.shape[1] == new_size ** 2 + 1:
69
+ self.is_resize_pos = True
70
+ else:
71
+ embedding_size = pos_embed_checkpoint.shape[-1]
72
+ num_extra_tokens = 1
73
+ new_num = new_size ** 2 + num_extra_tokens
74
+ print("Position interpolate from %dx%d to %dx%d" %
75
+ (orig_size, orig_size, new_size, new_size))
76
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
77
+ # only the position tokens are interpolated
78
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
79
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size,
80
+ embedding_size).permute(
81
+ 0, 3, 1, 2)
82
+ pos_tokens = torch.nn.functional.interpolate(pos_tokens,
83
+ size=(new_size,
84
+ new_size),
85
+ mode='bicubic',
86
+ align_corners=False)
87
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
88
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
89
+
90
+ new_pos_embed = new_pos_embed.squeeze(0)
91
+
92
+ self.vision_tower.vision_model.embeddings.position_embedding = torch.nn.Embedding(new_num, 1024)
93
+ #self.vision_tower.vision_model.embeddings.position_embedding.weight = torch.nn.Parameter(new_pos_embed.to(pos_embed_checkpoint.dtype))
94
+ #self.vision_tower.vision_model.embeddings.position_ids = torch.arange(new_num).expand((1, -1))
95
+
96
+ self.vision_tower.vision_model.embeddings.position_embedding.weight = torch.nn.Parameter(new_pos_embed.to(pos_embed_checkpoint.device).to(pos_embed_checkpoint.dtype))
97
+ self.vision_tower.vision_model.embeddings.position_ids = torch.arange(new_num).expand((1, -1)).to(pos_embed_checkpoint.device)
98
+ self.is_resize_pos = True
99
+
100
+ def feature_select(self, image_forward_outs):
101
+ image_features = image_forward_outs.hidden_states[self.select_layer]
102
+ if self.select_feature == 'patch':
103
+ image_features = image_features[:, 1:]
104
+ elif self.select_feature == 'cls_patch':
105
+ image_features = image_features
106
+ else:
107
+ raise ValueError(f'Unexpected select feature: {self.select_feature}')
108
+ return image_features
109
+
110
+ def forward(self, images):
111
+ if not self.is_loaded:
112
+ self.load_model()
113
+ if type(images) is list:
114
+ image_features = []
115
+ for image in images:
116
+ image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
117
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
118
+ image_features.append(image_feature)
119
+ else:
120
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
121
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
122
+
123
+ return image_features
124
+
125
+ @property
126
+ def dummy_feature(self):
127
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
128
+
129
+ @property
130
+ def dtype(self):
131
+ return self.vision_tower.dtype
132
+
133
+ @property
134
+ def device(self):
135
+ return self.vision_tower.device
136
+
137
+ @property
138
+ def config(self):
139
+ if self.is_loaded:
140
+ return self.vision_tower.config
141
+ else:
142
+ return self.cfg_only
143
+
144
+ @property
145
+ def hidden_size(self):
146
+ return self.config.hidden_size
147
+
148
+ @property
149
+ def num_patches(self):
150
+ return (self.config.image_size // self.config.patch_size) ** 2
151
+
152
+ class PLoRA(nn.Linear):
153
+ def __init__(self,
154
+ in_features: int,
155
+ out_features: int,
156
+ bias: bool = True,
157
+ device=None,
158
+ dtype=None,
159
+ lora_r=8,
160
+ lora_alpha=16,
161
+ lora_dropout=0.05,
162
+ lora_len=0,
163
+ **kwargs) -> None:
164
+ super().__init__(in_features, out_features, bias, device, dtype)
165
+ self.lora_r = lora_r
166
+ self.lora_alpha = lora_alpha
167
+ self.lora_len = lora_len
168
+ if lora_dropout > 0.:
169
+ self.lora_dropout = nn.Dropout(p=lora_dropout)
170
+ else:
171
+ self.lora_dropout = lambda x: x
172
+ self.lora_scaling = self.lora_alpha / self.lora_r
173
+
174
+ self.Plora_A = nn.Linear(in_features,
175
+ self.lora_r,
176
+ bias=False,
177
+ device=device,
178
+ dtype=dtype)
179
+ self.Plora_B = nn.Linear(self.lora_r,
180
+ out_features,
181
+ bias=False,
182
+ device=device,
183
+ dtype=dtype)
184
+
185
+ self.reset_parameters()
186
+
187
+ def reset_parameters(self):
188
+ if hasattr(self, 'lora_A'):
189
+ # initialize A the same way as the default for nn.Linear and B to zero
190
+ nn.init.kaiming_uniform_(self.lora_A.weight, a=math.sqrt(5))
191
+ nn.init.zeros_(self.lora_B.weight)
192
+ #print ("lora weight init {} {}".format(torch.mean(self.lora_A.weight), torch.mean(self.lora_B.weight)))
193
+
194
+ def forward(self, x, im_mask=None):
195
+ res = super().forward(x)
196
+ if im_mask is not None:
197
+ if torch.sum(im_mask) > 0:
198
+ part_x = x[im_mask]
199
+ res[im_mask] += self.Plora_B(self.Plora_A(
200
+ self.lora_dropout(part_x))) * self.lora_scaling
201
+ else:
202
+ part_x = x[:, :1]
203
+ res[:, :1] += self.Plora_B(self.Plora_A(
204
+ self.lora_dropout(part_x))) * 0
205
+ return res
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mnt/petrelfs/dingshuangrui/PuQu/internlm2-chat-7b",
3
+ "architectures": [
4
+ "InternLM2ForCausalLM"
5
+ ],
6
+ "attn_implementation": "eager",
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internlm.InternLMConfig",
9
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
10
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
11
+ },
12
+ "bias": false,
13
+ "bos_token_id": 1,
14
+ "eos_token_id": 2,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 14336,
19
+ "max_length": 2048,
20
+ "max_position_embeddings": 32768,
21
+ "model_type": "internlm",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 32,
24
+ "num_key_value_heads": 8,
25
+ "pad_token_id": 2,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 1.0,
29
+ "type": "dynamic"
30
+ },
31
+ "rope_theta": 1000000,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.28.0",
35
+ "use_cache": false,
36
+ "vocab_size": 93194
37
+ }
configuration_internlm.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ InternLM model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
+
29
+
30
+ class InternLMConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
+ an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 32000):
42
+ Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`InternLMModel`]
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 11008):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
+ `num_attention_heads`.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
73
+ Whether to tie weight embeddings
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import InternLMModel, InternLMConfig
78
+
79
+ >>> # Initializing a InternLM internlm-7b style configuration
80
+ >>> configuration = InternLMConfig()
81
+
82
+ >>> # Initializing a model from the internlm-7b style configuration
83
+ >>> model = InternLMModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+ model_type = "internlm"
89
+ _auto_class = "AutoConfig"
90
+
91
+ def __init__( # pylint: disable=W0102
92
+ self,
93
+ vocab_size=103168,
94
+ hidden_size=4096,
95
+ intermediate_size=11008,
96
+ num_hidden_layers=32,
97
+ num_attention_heads=32,
98
+ num_key_value_heads=None,
99
+ hidden_act="silu",
100
+ max_position_embeddings=2048,
101
+ initializer_range=0.02,
102
+ rms_norm_eps=1e-6,
103
+ use_cache=True,
104
+ pad_token_id=0,
105
+ bos_token_id=1,
106
+ eos_token_id=2,
107
+ tie_word_embeddings=False,
108
+ bias=True,
109
+ rope_theta=10000,
110
+ rope_scaling=None,
111
+ attn_implementation="eager",
112
+ **kwargs,
113
+ ):
114
+ self.vocab_size = vocab_size
115
+ self.max_position_embeddings = max_position_embeddings
116
+ self.hidden_size = hidden_size
117
+ self.intermediate_size = intermediate_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.bias = bias
121
+
122
+ if num_key_value_heads is None:
123
+ num_key_value_heads = num_attention_heads
124
+ self.num_key_value_heads = num_key_value_heads
125
+
126
+ self.hidden_act = hidden_act
127
+ self.initializer_range = initializer_range
128
+ self.rms_norm_eps = rms_norm_eps
129
+ self.use_cache = use_cache
130
+ self.rope_theta = rope_theta
131
+ self.rope_scaling = rope_scaling
132
+ self._rope_scaling_validation()
133
+
134
+ self.attn_implementation = attn_implementation
135
+ if self.attn_implementation is None:
136
+ self.attn_implementation = "eager"
137
+ super().__init__(
138
+ pad_token_id=pad_token_id,
139
+ bos_token_id=bos_token_id,
140
+ eos_token_id=eos_token_id,
141
+ tie_word_embeddings=tie_word_embeddings,
142
+ **kwargs,
143
+ )
144
+
145
+ def _rope_scaling_validation(self):
146
+ """
147
+ Validate the `rope_scaling` configuration.
148
+ """
149
+ if self.rope_scaling is None:
150
+ return
151
+
152
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
153
+ raise ValueError(
154
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
155
+ f"got {self.rope_scaling}"
156
+ )
157
+ rope_scaling_type = self.rope_scaling.get("type", None)
158
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
159
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
160
+ raise ValueError(
161
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
162
+ )
163
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
164
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 2,
6
+ "transformers_version": "4.28.0"
7
+ }
modeling_internlm2.py ADDED
@@ -0,0 +1,1270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch InternLM2 model."""
21
+ import math
22
+ import queue
23
+ import threading
24
+ import warnings
25
+ import copy
26
+ from typing import List, Optional, Tuple, Union
27
+ from torchvision import transforms
28
+ from torchvision.transforms.functional import InterpolationMode
29
+ from PIL import Image
30
+
31
+ import torch
32
+ import torch.utils.checkpoint
33
+ from einops import rearrange
34
+ from torch import nn
35
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
36
+ from transformers.activations import ACT2FN
37
+ from transformers.modeling_outputs import (
38
+ BaseModelOutputWithPast,
39
+ CausalLMOutputWithPast,
40
+ SequenceClassifierOutputWithPast,
41
+ )
42
+ from transformers.modeling_utils import PreTrainedModel
43
+ from transformers.utils import (
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ logging,
47
+ replace_return_docstrings,
48
+ )
49
+ from transformers import StoppingCriteria, StoppingCriteriaList
50
+ try:
51
+ from transformers.generation.streamers import BaseStreamer
52
+ except: # noqa # pylint: disable=bare-except
53
+ BaseStreamer = None
54
+
55
+ from .configuration_internlm import InternLMConfig as InternLM2Config
56
+ from .build_mlp import build_vision_tower, build_vision_projector, PLoRA
57
+
58
+ logger = logging.get_logger(__name__)
59
+
60
+ _CONFIG_FOR_DOC = "InternLM2Config"
61
+
62
+
63
+ class StoppingCriteriaSub(StoppingCriteria):
64
+ def __init__(self, stops=[], encounters=1):
65
+ super().__init__()
66
+ self.stops = stops
67
+
68
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
69
+ for stop in self.stops:
70
+ if torch.all((stop == input_ids[0][-len(stop):])).item():
71
+ return True
72
+
73
+ return False
74
+
75
+
76
+
77
+
78
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
79
+ def _make_causal_mask(
80
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
81
+ ):
82
+ """
83
+ Make causal mask used for bi-directional self-attention.
84
+ """
85
+ bsz, tgt_len = input_ids_shape
86
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
87
+ mask_cond = torch.arange(mask.size(-1), device=device)
88
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
89
+ mask = mask.to(dtype)
90
+
91
+ if past_key_values_length > 0:
92
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
93
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
94
+
95
+
96
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
97
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
98
+ """
99
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
100
+ """
101
+ bsz, src_len = mask.size()
102
+ tgt_len = tgt_len if tgt_len is not None else src_len
103
+
104
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
105
+
106
+ inverted_mask = 1.0 - expanded_mask
107
+
108
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
109
+
110
+
111
+ class InternLM2RMSNorm(nn.Module):
112
+ def __init__(self, hidden_size, eps=1e-6):
113
+ """
114
+ InternLM2RMSNorm is equivalent to T5LayerNorm
115
+ """
116
+ super().__init__()
117
+ self.weight = nn.Parameter(torch.ones(hidden_size))
118
+ self.variance_epsilon = eps
119
+
120
+ def forward(self, hidden_states):
121
+ input_dtype = hidden_states.dtype
122
+ hidden_states = hidden_states.to(torch.float32)
123
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
124
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
125
+ return self.weight * hidden_states.to(input_dtype)
126
+
127
+
128
+ class InternLM2RotaryEmbedding(nn.Module):
129
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
130
+ super().__init__()
131
+
132
+ self.dim = dim
133
+ self.max_position_embeddings = max_position_embeddings
134
+ self.base = base
135
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
136
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
137
+
138
+ # Build here to make `torch.jit.trace` work.
139
+ self._set_cos_sin_cache(
140
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
141
+ )
142
+
143
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
144
+ self.max_seq_len_cached = seq_len
145
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
146
+
147
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
148
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
149
+ emb = torch.cat((freqs, freqs), dim=-1)
150
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
151
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
152
+
153
+ def forward(self, x, seq_len=None):
154
+ # x: [bs, num_attention_heads, seq_len, head_size]
155
+ if seq_len > self.max_seq_len_cached:
156
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
157
+
158
+ return (
159
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
160
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
161
+ )
162
+
163
+
164
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
165
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
166
+
167
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
168
+ self.scaling_factor = scaling_factor
169
+ super().__init__(dim, max_position_embeddings, base, device)
170
+
171
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
172
+ self.max_seq_len_cached = seq_len
173
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
174
+ t = t / self.scaling_factor
175
+
176
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
177
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
178
+ emb = torch.cat((freqs, freqs), dim=-1)
179
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
180
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
181
+
182
+
183
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
184
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
185
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
186
+ """
187
+
188
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
189
+ self.scaling_factor = scaling_factor
190
+ super().__init__(dim, max_position_embeddings, base, device)
191
+
192
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
193
+ self.max_seq_len_cached = seq_len
194
+
195
+ if seq_len > self.max_position_embeddings:
196
+ base = self.base * (
197
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
198
+ ) ** (self.dim / (self.dim - 2))
199
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
200
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
201
+
202
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
203
+
204
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
205
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
206
+ emb = torch.cat((freqs, freqs), dim=-1)
207
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
208
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
209
+
210
+
211
+ def rotate_half(x):
212
+ """Rotates half the hidden dims of the input."""
213
+ x1 = x[..., : x.shape[-1] // 2]
214
+ x2 = x[..., x.shape[-1] // 2 :]
215
+ return torch.cat((-x2, x1), dim=-1)
216
+
217
+
218
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
219
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
220
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
221
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
222
+ cos = cos.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1)
223
+ sin = sin.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1)
224
+ if q.size(2) == 1:
225
+ q_embed = (q * cos[:, :, -1:, :]) + (rotate_half(q) * sin[:, :, -1:, :])
226
+ else:
227
+ q_embed = (q * cos) + (rotate_half(q) * sin)
228
+
229
+ if k.size(2) == 1:
230
+ k_embed = (k * cos[:, :, -1:, :]) + (rotate_half(k) * sin[:, :, -1:, :])
231
+ else:
232
+ k_embed = (k * cos) + (rotate_half(k) * sin)
233
+
234
+ return q_embed, k_embed
235
+
236
+
237
+ class InternLM2MLP(nn.Module):
238
+ def __init__(self, config):
239
+ super().__init__()
240
+ self.config = config
241
+ self.hidden_size = config.hidden_size
242
+ self.intermediate_size = config.intermediate_size
243
+ #self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
244
+ #self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
245
+ #self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
246
+
247
+ self.w1 = PLoRA(self.hidden_size, self.intermediate_size, bias=False,
248
+ lora_r=256, lora_alpha=256, lora_len=256)
249
+ self.w3 = PLoRA(self.hidden_size, self.intermediate_size, bias=False,
250
+ lora_r=256, lora_alpha=256, lora_len=256)
251
+ self.w2 = PLoRA(self.intermediate_size, self.hidden_size, bias=False,
252
+ lora_r=256, lora_alpha=256, lora_len=256)
253
+
254
+ self.act_fn = ACT2FN[config.hidden_act]
255
+
256
+ def forward(self, x, im_mask):
257
+ down_proj = self.w2(self.act_fn(self.w1(x, im_mask)) * self.w3(x, im_mask), im_mask)
258
+
259
+ return down_proj
260
+
261
+
262
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
263
+ """
264
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
265
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
266
+ """
267
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
268
+ if n_rep == 1:
269
+ return hidden_states
270
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
271
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
272
+
273
+
274
+ class InternLM2Attention(nn.Module):
275
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
276
+
277
+ def __init__(self, config: InternLM2Config):
278
+ super().__init__()
279
+ self.config = config
280
+ self.hidden_size = config.hidden_size
281
+ self.num_heads = config.num_attention_heads
282
+ self.head_dim = self.hidden_size // self.num_heads
283
+ self.num_key_value_heads = config.num_key_value_heads
284
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
285
+ self.max_position_embeddings = config.max_position_embeddings
286
+ self.is_causal = True
287
+
288
+ if (self.head_dim * self.num_heads) != self.hidden_size:
289
+ raise ValueError(
290
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
291
+ f" and `num_heads`: {self.num_heads})."
292
+ )
293
+
294
+ #self.wqkv = nn.Linear(
295
+ self.wqkv = PLoRA(
296
+ self.hidden_size,
297
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
298
+ bias=config.bias,
299
+ lora_r=256, lora_alpha=256, lora_len=256
300
+ )
301
+
302
+ #self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
303
+ self.wo = PLoRA(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias,
304
+ lora_r=256, lora_alpha=256, lora_len=256)
305
+ self._init_rope()
306
+
307
+ def _init_rope(self):
308
+ if self.config.rope_scaling is None:
309
+ self.rotary_emb = InternLM2RotaryEmbedding(
310
+ self.head_dim,
311
+ max_position_embeddings=self.max_position_embeddings,
312
+ base=self.config.rope_theta,
313
+ )
314
+ else:
315
+ scaling_type = self.config.rope_scaling["type"]
316
+ scaling_factor = self.config.rope_scaling["factor"]
317
+ if scaling_type == "dynamic":
318
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
319
+ self.head_dim,
320
+ max_position_embeddings=self.max_position_embeddings,
321
+ base=self.config.rope_theta,
322
+ scaling_factor=scaling_factor
323
+ )
324
+ else:
325
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic'.")
326
+ return self.rotary_emb
327
+
328
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
329
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
330
+
331
+ def forward(
332
+ self,
333
+ hidden_states: torch.Tensor,
334
+ attention_mask: Optional[torch.Tensor] = None,
335
+ position_ids: Optional[torch.LongTensor] = None,
336
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
337
+ output_attentions: bool = False,
338
+ use_cache: bool = False,
339
+ im_mask: Optional[Tuple[torch.Tensor]] = None,
340
+ **kwargs,
341
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
342
+ if "padding_mask" in kwargs:
343
+ warnings.warn(
344
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
345
+ "Please make sure use `attention_mask` instead.`"
346
+ )
347
+
348
+ bsz, q_len, _ = hidden_states.size()
349
+
350
+ qkv_states = self.wqkv(hidden_states, im_mask)
351
+
352
+ qkv_states = rearrange(
353
+ qkv_states,
354
+ "b q (h gs d) -> b q h gs d",
355
+ gs=2 + self.num_key_value_groups,
356
+ d=self.head_dim,
357
+ )
358
+
359
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
360
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
361
+ key_states = qkv_states[..., -2, :]
362
+ value_states = qkv_states[..., -1, :]
363
+
364
+ query_states = query_states.transpose(1, 2)
365
+ key_states = key_states.transpose(1, 2)
366
+ value_states = value_states.transpose(1, 2)
367
+
368
+ kv_seq_len = key_states.shape[-2]
369
+ if past_key_value is not None:
370
+ kv_seq_len += past_key_value[0].shape[-2]
371
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
372
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
373
+
374
+ if past_key_value is not None:
375
+ # reuse k, v, self_attention
376
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
377
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
378
+
379
+ past_key_value = (key_states, value_states) if use_cache else None
380
+
381
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
382
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
383
+
384
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
385
+
386
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
387
+ raise ValueError(
388
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
389
+ f" {attn_weights.size()}"
390
+ )
391
+
392
+ if attention_mask is not None:
393
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
394
+ raise ValueError(
395
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
396
+ )
397
+ attn_weights = attn_weights + attention_mask
398
+
399
+ # upcast attention to fp32
400
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
401
+ attn_output = torch.matmul(attn_weights, value_states)
402
+
403
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
404
+ raise ValueError(
405
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
406
+ f" {attn_output.size()}"
407
+ )
408
+
409
+ attn_output = attn_output.transpose(1, 2).contiguous()
410
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
411
+
412
+ attn_output = self.wo(attn_output, im_mask)
413
+
414
+ if not output_attentions:
415
+ attn_weights = None
416
+
417
+ return attn_output, attn_weights, past_key_value
418
+
419
+
420
+ class InternLM2FlashAttention2(InternLM2Attention):
421
+ """
422
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
423
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
424
+ flash attention and deal with padding tokens in case the input contains any of them.
425
+ """
426
+
427
+ def forward(
428
+ self,
429
+ hidden_states: torch.Tensor,
430
+ attention_mask: Optional[torch.LongTensor] = None,
431
+ position_ids: Optional[torch.LongTensor] = None,
432
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
433
+ output_attentions: bool = False,
434
+ use_cache: bool = False,
435
+ im_mask: Optional[Tuple[torch.Tensor]] = None,
436
+ **kwargs,
437
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
438
+ # InternLM2FlashAttention2 attention does not support output_attentions
439
+ if "padding_mask" in kwargs:
440
+ warnings.warn(
441
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
442
+ "Please make sure use `attention_mask` instead.`"
443
+ )
444
+
445
+ # overwrite attention_mask with padding_mask
446
+ attention_mask = kwargs.pop("padding_mask")
447
+
448
+ output_attentions = False
449
+
450
+ bsz, q_len, _ = hidden_states.size()
451
+
452
+ qkv_states = self.wqkv(hidden_states, im_mask)
453
+
454
+ qkv_states = rearrange(
455
+ qkv_states,
456
+ "b q (h gs d) -> b q h gs d",
457
+ gs=self.num_heads + 2 * self.num_key_value_heads,
458
+ d=self.head_dim,
459
+ q=q_len,
460
+ )
461
+
462
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
463
+ query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d")
464
+ key_states = qkv_states[..., -2, :]
465
+ value_states = qkv_states[..., -1, :]
466
+
467
+ kv_seq_len = key_states.shape[-2]
468
+ if past_key_value is not None:
469
+ kv_seq_len += past_key_value[0].shape[-2]
470
+
471
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
472
+
473
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
474
+
475
+ if past_key_value is not None:
476
+ # reuse k, v, self_attention
477
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
478
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
479
+
480
+ past_key_value = (key_states, value_states) if use_cache else None
481
+
482
+ query_states = query_states.transpose(1, 2)
483
+ key_states = key_states.transpose(1, 2)
484
+ value_states = value_states.transpose(1, 2)
485
+
486
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
487
+
488
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
489
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
490
+ # cast them back in the correct dtype just to be sure everything works as expected.
491
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
492
+ # in fp32. (InternLM2RMSNorm handles it correctly)
493
+
494
+ input_dtype = query_states.dtype
495
+ if input_dtype == torch.float32:
496
+ # Handle the case where the model is quantized
497
+ if hasattr(self.config, "_pre_quantization_dtype"):
498
+ target_dtype = self.config._pre_quantization_dtype
499
+ else:
500
+ target_dtype = self.q_proj.weight.dtype
501
+
502
+ logger.warning_once(
503
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
504
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back "
505
+ f"the input in {target_dtype}."
506
+ )
507
+
508
+ query_states = query_states.to(target_dtype)
509
+ key_states = key_states.to(target_dtype)
510
+ value_states = value_states.to(target_dtype)
511
+
512
+ attn_output = self._flash_attention_forward(
513
+ query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
514
+ )
515
+
516
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
517
+ attn_output = self.wo(attn_output, im_mask)
518
+
519
+ if not output_attentions:
520
+ attn_weights = None
521
+
522
+ return attn_output, attn_weights, past_key_value
523
+
524
+
525
+ class InternLM2DecoderLayer(nn.Module):
526
+ def __init__(self, config: InternLM2Config):
527
+ super().__init__()
528
+ self.hidden_size = config.hidden_size
529
+ self.attention = (
530
+ InternLM2Attention(config=config)
531
+ if not getattr(config, "_flash_attn_2_enabled", False)
532
+ else InternLM2FlashAttention2(config=config)
533
+ )
534
+ self.feed_forward = InternLM2MLP(config)
535
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
536
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
537
+
538
+ def forward(
539
+ self,
540
+ hidden_states: torch.Tensor,
541
+ attention_mask: Optional[torch.Tensor] = None,
542
+ position_ids: Optional[torch.LongTensor] = None,
543
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
544
+ output_attentions: Optional[bool] = False,
545
+ use_cache: Optional[bool] = False,
546
+ im_mask: Optional[Tuple[torch.Tensor]] = None,
547
+ **kwargs,
548
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
549
+ """
550
+ Args:
551
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
552
+ attention_mask (`torch.FloatTensor`, *optional*):
553
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
554
+ query_sequence_length, key_sequence_length)` if default attention is used.
555
+ output_attentions (`bool`, *optional*):
556
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
557
+ returned tensors for more detail.
558
+ use_cache (`bool`, *optional*):
559
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
560
+ (see `past_key_values`).
561
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
562
+ """
563
+ if "padding_mask" in kwargs:
564
+ warnings.warn(
565
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. "
566
+ "Please make sure use `attention_mask` instead.`"
567
+ )
568
+
569
+ residual = hidden_states
570
+
571
+ hidden_states = self.attention_norm(hidden_states)
572
+
573
+ # Self Attention
574
+ hidden_states, self_attn_weights, present_key_value = self.attention(
575
+ hidden_states=hidden_states,
576
+ attention_mask=attention_mask,
577
+ position_ids=position_ids,
578
+ past_key_value=past_key_value,
579
+ output_attentions=output_attentions,
580
+ use_cache=use_cache,
581
+ im_mask=im_mask,
582
+ **kwargs,
583
+ )
584
+ hidden_states = residual + hidden_states
585
+
586
+ # Fully Connected
587
+ residual = hidden_states
588
+ hidden_states = self.ffn_norm(hidden_states)
589
+ hidden_states = self.feed_forward(hidden_states, im_mask)
590
+ hidden_states = residual + hidden_states
591
+
592
+ outputs = (hidden_states,)
593
+
594
+ if output_attentions:
595
+ outputs += (self_attn_weights,)
596
+
597
+ if use_cache:
598
+ outputs += (present_key_value,)
599
+
600
+ return outputs
601
+
602
+
603
+ InternLM2_START_DOCSTRING = r"""
604
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
605
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
606
+ etc.)
607
+
608
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
609
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
610
+ and behavior.
611
+
612
+ Parameters:
613
+ config ([`InternLM2Config`]):
614
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
615
+ load the weights associated with the model, only the configuration. Check out the
616
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
617
+ """
618
+
619
+
620
+ @add_start_docstrings(
621
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
622
+ InternLM2_START_DOCSTRING,
623
+ )
624
+ class InternLM2PreTrainedModel(PreTrainedModel):
625
+ config_class = InternLM2Config
626
+ base_model_prefix = "model"
627
+ supports_gradient_checkpointing = True
628
+ _no_split_modules = ["InternLM2DecoderLayer"]
629
+ _skip_keys_device_placement = "past_key_values"
630
+ _supports_flash_attn_2 = True
631
+
632
+ def _init_weights(self, module):
633
+ std = self.config.initializer_range
634
+ if isinstance(module, nn.Linear):
635
+ module.weight.data.normal_(mean=0.0, std=std)
636
+ if module.bias is not None:
637
+ module.bias.data.zero_()
638
+ elif isinstance(module, nn.Embedding):
639
+ module.weight.data.normal_(mean=0.0, std=std)
640
+ if module.padding_idx is not None:
641
+ module.weight.data[module.padding_idx].zero_()
642
+
643
+
644
+ InternLM2_INPUTS_DOCSTRING = r"""
645
+ Args:
646
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
647
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
648
+ it.
649
+
650
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
651
+ [`PreTrainedTokenizer.__call__`] for details.
652
+
653
+ [What are input IDs?](../glossary#input-ids)
654
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
655
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
656
+
657
+ - 1 for tokens that are **not masked**,
658
+ - 0 for tokens that are **masked**.
659
+
660
+ [What are attention masks?](../glossary#attention-mask)
661
+
662
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
663
+ [`PreTrainedTokenizer.__call__`] for details.
664
+
665
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
666
+ `past_key_values`).
667
+
668
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
669
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
670
+ information on the default strategy.
671
+
672
+ - 1 indicates the head is **not masked**,
673
+ - 0 indicates the head is **masked**.
674
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
675
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
676
+ config.n_positions - 1]`.
677
+
678
+ [What are position IDs?](../glossary#position-ids)
679
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
680
+ when `config.use_cache=True`):
681
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
682
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
683
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
684
+
685
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
686
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
687
+
688
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
689
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
690
+ of shape `(batch_size, sequence_length)`.
691
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
692
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
693
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
694
+ model's internal embedding lookup matrix.
695
+ use_cache (`bool`, *optional*):
696
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
697
+ `past_key_values`).
698
+ output_attentions (`bool`, *optional*):
699
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
700
+ tensors for more detail.
701
+ output_hidden_states (`bool`, *optional*):
702
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
703
+ more detail.
704
+ return_dict (`bool`, *optional*):
705
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
706
+ """
707
+
708
+
709
+ @add_start_docstrings(
710
+ "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.",
711
+ InternLM2_START_DOCSTRING,
712
+ )
713
+ class InternLM2Model(InternLM2PreTrainedModel):
714
+ """
715
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
716
+
717
+ Args:
718
+ config: InternLM2Config
719
+ """
720
+
721
+ _auto_class = "AutoModel"
722
+
723
+ def __init__(self, config: InternLM2Config):
724
+ super().__init__(config)
725
+ self.padding_idx = config.pad_token_id
726
+ self.vocab_size = config.vocab_size
727
+
728
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
729
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
730
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
731
+
732
+ self.gradient_checkpointing = False
733
+ # Initialize weights and apply final processing
734
+ self.post_init()
735
+
736
+ def get_input_embeddings(self):
737
+ return self.tok_embeddings
738
+
739
+ def set_input_embeddings(self, value):
740
+ self.tok_embeddings = value
741
+
742
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
743
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
744
+ # create causal mask
745
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
746
+ combined_attention_mask = None
747
+ if input_shape[-1] > 1:
748
+ combined_attention_mask = _make_causal_mask(
749
+ input_shape,
750
+ inputs_embeds.dtype,
751
+ device=inputs_embeds.device,
752
+ past_key_values_length=past_key_values_length,
753
+ )
754
+
755
+ if attention_mask is not None:
756
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
757
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
758
+ inputs_embeds.device
759
+ )
760
+ combined_attention_mask = (
761
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
762
+ )
763
+
764
+ return combined_attention_mask
765
+
766
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
767
+ def forward(
768
+ self,
769
+ input_ids: torch.LongTensor = None,
770
+ attention_mask: Optional[torch.Tensor] = None,
771
+ position_ids: Optional[torch.LongTensor] = None,
772
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
773
+ inputs_embeds: Optional[torch.FloatTensor] = None,
774
+ use_cache: Optional[bool] = None,
775
+ output_attentions: Optional[bool] = None,
776
+ output_hidden_states: Optional[bool] = None,
777
+ return_dict: Optional[bool] = None,
778
+ **kwargs
779
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
780
+
781
+ im_mask = kwargs.get('im_mask', None)
782
+
783
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
784
+ output_hidden_states = (
785
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
786
+ )
787
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
788
+
789
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
790
+
791
+ # retrieve input_ids and inputs_embeds
792
+ if input_ids is not None and inputs_embeds is not None:
793
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
794
+ elif input_ids is not None:
795
+ batch_size, seq_length = input_ids.shape[:2]
796
+ elif inputs_embeds is not None:
797
+ batch_size, seq_length = inputs_embeds.shape[:2]
798
+ else:
799
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
800
+
801
+ seq_length_with_past = seq_length
802
+ past_key_values_length = 0
803
+ if past_key_values is not None:
804
+ past_key_values_length = past_key_values[0][0].shape[2]
805
+ seq_length_with_past = seq_length_with_past + past_key_values_length
806
+
807
+ if position_ids is None:
808
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
809
+ position_ids = torch.arange(
810
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
811
+ )
812
+ position_ids = position_ids.unsqueeze(0)
813
+
814
+ if inputs_embeds is None:
815
+ inputs_embeds = self.tok_embeddings(input_ids)
816
+ im_mask = torch.zeros(inputs_embeds.shape[:2]).to(inputs_embeds.device).bool()
817
+ # embed positions
818
+ if attention_mask is None:
819
+ attention_mask = torch.ones(
820
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
821
+ )
822
+ attention_mask = self._prepare_decoder_attention_mask(
823
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
824
+ )
825
+
826
+ # embed positions
827
+ hidden_states = inputs_embeds
828
+
829
+ if self.gradient_checkpointing and self.training:
830
+ if use_cache:
831
+ logger.warning_once(
832
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
833
+ )
834
+ use_cache = False
835
+
836
+ # decoder layers
837
+ all_hidden_states = () if output_hidden_states else None
838
+ all_self_attns = () if output_attentions else None
839
+ next_decoder_cache = () if use_cache else None
840
+
841
+ for idx, decoder_layer in enumerate(self.layers):
842
+ if output_hidden_states:
843
+ all_hidden_states += (hidden_states,)
844
+
845
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
846
+
847
+ if self.gradient_checkpointing and self.training:
848
+
849
+ def create_custom_forward(module):
850
+ def custom_forward(*inputs):
851
+ # None for past_key_value
852
+ return module(*inputs, output_attentions, None, im_mask)
853
+
854
+ return custom_forward
855
+
856
+ layer_outputs = torch.utils.checkpoint.checkpoint(
857
+ create_custom_forward(decoder_layer),
858
+ hidden_states,
859
+ attention_mask,
860
+ position_ids,
861
+ None,
862
+ )
863
+ else:
864
+ layer_outputs = decoder_layer(
865
+ hidden_states,
866
+ attention_mask=attention_mask,
867
+ position_ids=position_ids,
868
+ past_key_value=past_key_value,
869
+ output_attentions=output_attentions,
870
+ use_cache=use_cache,
871
+ im_mask=im_mask,
872
+ )
873
+
874
+ hidden_states = layer_outputs[0]
875
+
876
+ if use_cache:
877
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
878
+
879
+ if output_attentions:
880
+ all_self_attns += (layer_outputs[1],)
881
+
882
+ hidden_states = self.norm(hidden_states)
883
+
884
+ # add hidden states from the last decoder layer
885
+ if output_hidden_states:
886
+ all_hidden_states += (hidden_states,)
887
+
888
+ next_cache = next_decoder_cache if use_cache else None
889
+ if not return_dict:
890
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
891
+ return BaseModelOutputWithPast(
892
+ last_hidden_state=hidden_states,
893
+ past_key_values=next_cache,
894
+ hidden_states=all_hidden_states,
895
+ attentions=all_self_attns,
896
+ )
897
+
898
+
899
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
900
+ _auto_class = "AutoModelForCausalLM"
901
+
902
+ _tied_weights_keys = ["output.weight"]
903
+
904
+ def __init__(self, config):
905
+ super().__init__(config)
906
+ self.model = InternLM2Model(config)
907
+ self.vocab_size = config.vocab_size
908
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
909
+ self.debug_flag = 1
910
+ self.tokenizer = None
911
+
912
+ self.max_length = config.max_length
913
+ print (f'Set max length to {self.max_length}')
914
+ self.debug_flag = 1
915
+ # Initialize weights and apply final processing
916
+ self.post_init()
917
+
918
+ def _set_gradient_checkpointing(self, module, value=False):
919
+ if isinstance(module, InternLM2Model):
920
+ module.gradient_checkpointing = value
921
+ # if value:
922
+ # self.vit.vision_tower.vision_model.encoder.gradient_checkpointing = value
923
+
924
+ def get_input_embeddings(self):
925
+ return self.model.tok_embeddings
926
+
927
+ def set_input_embeddings(self, value):
928
+ self.model.tok_embeddings = value
929
+
930
+ def get_output_embeddings(self):
931
+ return self.output
932
+
933
+ def set_output_embeddings(self, new_embeddings):
934
+ self.output = new_embeddings
935
+
936
+ def set_decoder(self, decoder):
937
+ self.model = decoder
938
+
939
+ def get_decoder(self):
940
+ return self.model
941
+ def encode_text(self, t, add_special_tokens=False):
942
+ t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n')
943
+ t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n')
944
+ t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]')
945
+ t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]')
946
+ t = t.replace('[UNUSED_TOKEN_0]', '[UNUSED_TOKEN_145]')
947
+ t = t.replace('[UNUSED_TOKEN_1]', '[UNUSED_TOKEN_145]')
948
+
949
+ text = t
950
+ token = self.tokenizer(text,
951
+ return_tensors='pt',
952
+ add_special_tokens=add_special_tokens).input_ids.to(self.device)
953
+ embs = self.model.tok_embeddings(token)
954
+ return embs
955
+
956
+ def prompt_wrap(self, img_embeds, prompt):
957
+ batch_size = img_embeds.shape[0]
958
+ p_before, p_after = prompt.split('<ImageHere>')
959
+ p_before_tokens = self.tokenizer(
960
+ p_before, return_tensors="pt", add_special_tokens=True).to(img_embeds.device)
961
+
962
+ p_before_embeds = self.model.tok_embeddings(p_before_tokens.input_ids).expand(batch_size, -1, -1)
963
+ wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds], dim=1)
964
+
965
+ wrapped_atts_img = torch.ones(wrapped_img_embeds.size()[:-1], dtype=torch.long).to(img_embeds.device)
966
+
967
+ wrapped_target = torch.ones(batch_size, wrapped_img_embeds.shape[1], dtype=torch.long).to(img_embeds.device) * -100
968
+
969
+
970
+ return wrapped_img_embeds, wrapped_atts_img, wrapped_target
971
+
972
+ def text2emb(self, text, add_special=False):
973
+ # import pdb; pdb.set_trace()
974
+ new_text = []
975
+ for t in text:
976
+ t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n')
977
+ t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n')
978
+ t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]')
979
+ t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]')
980
+ new_text.append(t)
981
+ text = new_text
982
+ to_regress_tokens = self.tokenizer(
983
+ text,
984
+ return_tensors="pt",
985
+ padding="longest",
986
+ truncation=True,
987
+ max_length=self.max_length,
988
+ add_special_tokens=add_special
989
+ ).to(self.device)
990
+
991
+ # targets = self.mask_human_targets(to_regress_tokens.input_ids)
992
+ # targets = targets.to(self.device)
993
+ targets = to_regress_tokens.input_ids.masked_fill(
994
+ to_regress_tokens.input_ids == self.tokenizer.pad_token_id, -100
995
+ ).to(self.device)
996
+
997
+
998
+ return to_regress_tokens, targets
999
+
1000
+ def mask_human_targets(self, input_ids, pure=False):
1001
+ target_batch = []
1002
+ for bs in range(input_ids.shape[0]):
1003
+ cur_idx = 0
1004
+ ids = input_ids[bs]
1005
+ targets = copy.deepcopy(ids)
1006
+ end_count = 0
1007
+ last_eoa = 0
1008
+ for i, temp_id in enumerate(ids):
1009
+ if temp_id == 92542:
1010
+ if end_count % 2 == 0:
1011
+ targets[last_eoa: i+6] = -100
1012
+ else:
1013
+ last_eoa = i + 1
1014
+ end_count += 1
1015
+ elif temp_id == 2: ### eos and following pad
1016
+ targets[i+1:] = -100 #### loss on eos, but not on pad
1017
+ break
1018
+ if temp_id != 2 and end_count % 2 == 0: ### trunction, end at last question
1019
+ targets[last_eoa+1:] = -100 #### mask all after the last answer
1020
+
1021
+ target_batch.append(targets.unsqueeze(0))
1022
+ if self.debug_flag and 0:
1023
+ print ('#### Warining! System meta is not support now')
1024
+ targets_vis = targets.clone()
1025
+ targets_vis[targets_vis==-100] = 92399
1026
+ targets_vis_tokens = ''.join(self.tokenizer.convert_ids_to_tokens(targets_vis)).replace('[UNUSED_TOKEN_2]', " ")
1027
+ print(''.join(self.tokenizer.convert_ids_to_tokens(ids)))
1028
+ print('-----------')
1029
+ print([targets_vis_tokens])
1030
+ print('-----------------------------')
1031
+
1032
+ target_batch = torch.cat(target_batch, dim=0)
1033
+ return target_batch
1034
+
1035
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1036
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1037
+ def forward(
1038
+ self,
1039
+ input_ids: torch.LongTensor = None,
1040
+ attention_mask: Optional[torch.Tensor] = None,
1041
+ position_ids: Optional[torch.LongTensor] = None,
1042
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1043
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1044
+ labels: Optional[torch.LongTensor] = None,
1045
+ use_cache: Optional[bool] = None,
1046
+ output_attentions: Optional[bool] = None,
1047
+ output_hidden_states: Optional[bool] = None,
1048
+ return_dict: Optional[bool] = None,
1049
+ **kwargs
1050
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1051
+ r"""
1052
+ Args:
1053
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1054
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1055
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1056
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1057
+
1058
+ Returns:
1059
+
1060
+ Example:
1061
+
1062
+ ```python
1063
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1064
+
1065
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1066
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1067
+
1068
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1069
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1070
+
1071
+ >>> # Generate
1072
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1073
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1074
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1075
+ ```"""
1076
+ samples = kwargs.get('samples', None)
1077
+ if samples:
1078
+ if self.debug_flag:
1079
+ self.debug_flag += 1
1080
+ if self.debug_flag > 5:
1081
+ self.debug_flag = 0
1082
+
1083
+ has_img = 'image' in samples.keys()
1084
+ # import pdb; pdb.set_trace()
1085
+ ### encode text
1086
+ # sp_token = samples["sp_token"]
1087
+
1088
+ text = samples['text_input']
1089
+ text = ['<|User|>:' + t for t in text]
1090
+ to_regress_tokens, targets = self.text2emb(text, add_special = True)
1091
+
1092
+ to_regress_embeds = self.model.tok_embeddings(to_regress_tokens.input_ids)
1093
+ attention_mask = to_regress_tokens.attention_mask
1094
+
1095
+ if has_img:
1096
+ ### encode image
1097
+ image = samples["image"][0]
1098
+ bs = to_regress_embeds.shape[0]
1099
+ assert image.shape[0] == bs
1100
+ ### combine text and image
1101
+ if samples['data_type'][0] != 'nlp':
1102
+ img_embeds, atts_img, img_target = self.img2emb(image)
1103
+ to_regress_embeds = torch.cat([to_regress_embeds[:,:1], img_embeds, to_regress_embeds[:,1:]], dim=1)
1104
+ attention_mask = torch.cat([attention_mask[:,:1], atts_img, attention_mask[:,1:]], dim=1)
1105
+ targets = torch.cat([targets[:,:1], img_target, targets[:,1:]], dim=1)
1106
+
1107
+ im_len = img_embeds.shape[1]
1108
+ im_mask = torch.zeros(to_regress_embeds.shape[:2]).cuda()
1109
+ im_mask[:,1:1+im_len] = 1
1110
+ temp_max_length = self.max_length
1111
+
1112
+ else:
1113
+ img_embeds, atts_img, img_target = self.img2emb(torch.zeros(1,3,self.im_size,self.im_size).to(image.device).to(image.dtype))
1114
+ to_regress_embeds += img_embeds.sum() * 0
1115
+ im_mask = torch.zeros(to_regress_embeds.shape[:2]).cuda()
1116
+ temp_max_length = 2048
1117
+
1118
+ temp_max_length = 2048
1119
+ inputs_embeds = to_regress_embeds[:, :temp_max_length]
1120
+ attention_mask = attention_mask[:, :temp_max_length]
1121
+ targets = targets[:, :temp_max_length]
1122
+ # im_mask = im_mask[:, :temp_max_length].bool()
1123
+ labels = targets
1124
+ if self.debug_flag:
1125
+ print (targets.shape, inputs_embeds.shape, attention_mask.shape)
1126
+ le = len(samples['text_input'])
1127
+ data_type = samples['data_type'][0]
1128
+ print (f'DataType: {data_type}. Has Image: {has_img}. Current max length: {self.max_length}, BatchSize is {le}')
1129
+ if has_img:
1130
+ print (img_embeds.shape)
1131
+
1132
+ else:
1133
+ self.debug_flag = 0
1134
+ im_mask = kwargs.get('im_mask', None)
1135
+ if im_mask is None and inputs_embeds is not None:
1136
+ im_mask = torch.zeros(inputs_embeds.shape[:2]).to(inputs_embeds.device)
1137
+ im_mask[:,1:1+256] = 1
1138
+ im_mask = im_mask.bool()
1139
+
1140
+
1141
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1142
+ output_hidden_states = (
1143
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1144
+ )
1145
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1146
+
1147
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1148
+ outputs = self.model(
1149
+ input_ids=input_ids,
1150
+ attention_mask=attention_mask,
1151
+ position_ids=position_ids,
1152
+ past_key_values=past_key_values,
1153
+ inputs_embeds=inputs_embeds,
1154
+ use_cache=use_cache,
1155
+ output_attentions=output_attentions,
1156
+ output_hidden_states=output_hidden_states,
1157
+ return_dict=return_dict,
1158
+ )
1159
+
1160
+ hidden_states = outputs[0]
1161
+ logits = self.output(hidden_states)
1162
+ logits = logits.float()
1163
+
1164
+ loss = None
1165
+ if labels is not None:
1166
+ # Shift so that tokens < n predict n
1167
+ shift_logits = logits[..., :-1, :].contiguous()
1168
+ shift_labels = labels[..., 1:].contiguous()
1169
+ # Flatten the tokens
1170
+ loss_fct = CrossEntropyLoss(reduce=False)
1171
+ B, N = shift_logits.shape[:2]
1172
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1173
+ shift_labels = shift_labels.view(-1)
1174
+ mask = shift_labels >= 0
1175
+ # Enable model parallelism
1176
+ shift_labels = shift_labels.to(shift_logits.device)
1177
+ loss = loss_fct(shift_logits, shift_labels)
1178
+ loss = (loss.view(B,N).sum(dim=1) / mask.view(B,N).sum(dim=1)).mean()
1179
+
1180
+ if not return_dict:
1181
+ output = (logits,) + outputs[1:]
1182
+ return (loss,) + output if loss is not None else output
1183
+
1184
+ return CausalLMOutputWithPast(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ past_key_values=outputs.past_key_values,
1188
+ hidden_states=outputs.hidden_states,
1189
+ attentions=outputs.attentions,
1190
+ )
1191
+
1192
+ def prepare_inputs_for_generation(
1193
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, im_mask=None, **kwargs
1194
+ ):
1195
+ if past_key_values is not None:
1196
+ past_length = past_key_values[0][0].shape[2]
1197
+
1198
+ # Some generation methods already pass only the last input ID
1199
+ if input_ids.shape[1] > past_length:
1200
+ remove_prefix_length = past_length
1201
+ else:
1202
+ # Default to old behavior: keep only final ID
1203
+ remove_prefix_length = input_ids.shape[1] - 1
1204
+
1205
+ input_ids = input_ids[:, remove_prefix_length:]
1206
+
1207
+ position_ids = kwargs.get("position_ids", None)
1208
+ if attention_mask is not None and position_ids is None:
1209
+ # create position_ids on the fly for batch generation
1210
+ position_ids = attention_mask.long().cumsum(-1) - 1
1211
+ position_ids.masked_fill_(attention_mask == 0, 1)
1212
+ if past_key_values:
1213
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1214
+
1215
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1216
+ if inputs_embeds is not None and past_key_values is None:
1217
+ model_inputs = {"inputs_embeds": inputs_embeds}
1218
+ else:
1219
+ model_inputs = {"input_ids": input_ids}
1220
+
1221
+ im_mask = im_mask
1222
+
1223
+ model_inputs.update(
1224
+ {
1225
+ "position_ids": position_ids,
1226
+ "past_key_values": past_key_values,
1227
+ "use_cache": kwargs.get("use_cache"),
1228
+ "attention_mask": attention_mask,
1229
+ "im_mask": im_mask,
1230
+ }
1231
+ )
1232
+ return model_inputs
1233
+
1234
+ @staticmethod
1235
+ def _reorder_cache(past_key_values, beam_idx):
1236
+ reordered_past = ()
1237
+ for layer_past in past_key_values:
1238
+ reordered_past += (
1239
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1240
+ )
1241
+ return reordered_past
1242
+
1243
+ def inference_pretrain(self, question, tokenizer):
1244
+ print(question)
1245
+ question = f'[UNUSED_TOKEN_146]user\n{question}'
1246
+ stop_words_ids = [
1247
+ torch.tensor([2]).cuda(), #'</s>'
1248
+ torch.tensor([92542]).cuda(), #'[UNUSED_TOKEN_145]'
1249
+ ]
1250
+ stopping_criteria = StoppingCriteriaList(
1251
+ [StoppingCriteriaSub(stops=stop_words_ids)])
1252
+ result = []
1253
+ for i in range(3):
1254
+ print(f'------attempt {i}------')
1255
+ d = f"{question}"
1256
+ input_ids = tokenizer(d, return_tensors="pt")["input_ids"]
1257
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["[UNUSED_TOKEN_145]"])[0]]
1258
+ with torch.no_grad():
1259
+ generate = self.generate(input_ids.cuda(),
1260
+ do_sample=True,
1261
+ temperature=1.0,
1262
+ repetition_penalty=1.005,
1263
+ max_new_tokens=1000,
1264
+ top_p=0.8,
1265
+ top_k=50,
1266
+ eos_token_id=eos_token_id,
1267
+ stopping_criteria=stopping_criteria,)
1268
+ response = tokenizer.decode(generate[0].tolist(), skip_special_tokens=True)
1269
+ print(response[len('[UNUSED_TOKEN_146]user '):-len('[UNUSED_TOKEN_145]\n')])
1270
+
pytorch_model.bin.index.json ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16694026240
4
+ },
5
+ "weight_map": {
6
+ "model.layers.0.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
7
+ "model.layers.0.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
8
+ "model.layers.0.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
9
+ "model.layers.0.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
10
+ "model.layers.0.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
11
+ "model.layers.0.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
12
+ "model.layers.0.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
13
+ "model.layers.0.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
14
+ "model.layers.0.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
15
+ "model.layers.0.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
16
+ "model.layers.0.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
17
+ "model.layers.0.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
18
+ "model.layers.0.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
19
+ "model.layers.0.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
20
+ "model.layers.0.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
21
+ "model.layers.0.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
22
+ "model.layers.0.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
23
+ "model.layers.1.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
24
+ "model.layers.1.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
25
+ "model.layers.1.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
26
+ "model.layers.1.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
27
+ "model.layers.1.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
28
+ "model.layers.1.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
29
+ "model.layers.1.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
30
+ "model.layers.1.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
31
+ "model.layers.1.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
32
+ "model.layers.1.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
33
+ "model.layers.1.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
34
+ "model.layers.1.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
35
+ "model.layers.1.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
36
+ "model.layers.1.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
37
+ "model.layers.1.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
38
+ "model.layers.1.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
39
+ "model.layers.1.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
40
+ "model.layers.10.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
41
+ "model.layers.10.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
42
+ "model.layers.10.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
43
+ "model.layers.10.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
44
+ "model.layers.10.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
45
+ "model.layers.10.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
46
+ "model.layers.10.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
47
+ "model.layers.10.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
48
+ "model.layers.10.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
49
+ "model.layers.10.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
50
+ "model.layers.10.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
51
+ "model.layers.10.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
52
+ "model.layers.10.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
53
+ "model.layers.10.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
54
+ "model.layers.10.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
55
+ "model.layers.10.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
56
+ "model.layers.10.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
57
+ "model.layers.11.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
58
+ "model.layers.11.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
59
+ "model.layers.11.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
60
+ "model.layers.11.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
61
+ "model.layers.11.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
62
+ "model.layers.11.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
63
+ "model.layers.11.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
64
+ "model.layers.11.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
65
+ "model.layers.11.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
66
+ "model.layers.11.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
67
+ "model.layers.11.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
68
+ "model.layers.11.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
69
+ "model.layers.11.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
70
+ "model.layers.11.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
71
+ "model.layers.11.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
72
+ "model.layers.11.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
73
+ "model.layers.11.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
74
+ "model.layers.12.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
75
+ "model.layers.12.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
76
+ "model.layers.12.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
77
+ "model.layers.12.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
78
+ "model.layers.12.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
79
+ "model.layers.12.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
80
+ "model.layers.12.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
81
+ "model.layers.12.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
82
+ "model.layers.12.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
83
+ "model.layers.12.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
84
+ "model.layers.12.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
85
+ "model.layers.12.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
86
+ "model.layers.12.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
87
+ "model.layers.12.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
88
+ "model.layers.12.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
89
+ "model.layers.12.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
90
+ "model.layers.12.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
91
+ "model.layers.13.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
92
+ "model.layers.13.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
93
+ "model.layers.13.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
94
+ "model.layers.13.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
95
+ "model.layers.13.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
96
+ "model.layers.13.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
97
+ "model.layers.13.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
98
+ "model.layers.13.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
99
+ "model.layers.13.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
100
+ "model.layers.13.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
101
+ "model.layers.13.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
102
+ "model.layers.13.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
103
+ "model.layers.13.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
104
+ "model.layers.13.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
105
+ "model.layers.13.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
106
+ "model.layers.13.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
107
+ "model.layers.13.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
108
+ "model.layers.14.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
109
+ "model.layers.14.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
110
+ "model.layers.14.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
111
+ "model.layers.14.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
112
+ "model.layers.14.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
113
+ "model.layers.14.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
114
+ "model.layers.14.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
115
+ "model.layers.14.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
116
+ "model.layers.14.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
117
+ "model.layers.14.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
118
+ "model.layers.14.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
119
+ "model.layers.14.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
120
+ "model.layers.14.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
121
+ "model.layers.14.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
122
+ "model.layers.14.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
123
+ "model.layers.14.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
124
+ "model.layers.14.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
125
+ "model.layers.15.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
126
+ "model.layers.15.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
127
+ "model.layers.15.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
128
+ "model.layers.15.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
129
+ "model.layers.15.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
130
+ "model.layers.15.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
131
+ "model.layers.15.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
132
+ "model.layers.15.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
133
+ "model.layers.15.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
134
+ "model.layers.15.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
135
+ "model.layers.15.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
136
+ "model.layers.15.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
137
+ "model.layers.15.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
138
+ "model.layers.15.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
139
+ "model.layers.15.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
140
+ "model.layers.15.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
141
+ "model.layers.15.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
142
+ "model.layers.16.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
143
+ "model.layers.16.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
144
+ "model.layers.16.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
145
+ "model.layers.16.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
146
+ "model.layers.16.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
147
+ "model.layers.16.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
148
+ "model.layers.16.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
149
+ "model.layers.16.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
150
+ "model.layers.16.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
151
+ "model.layers.16.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
152
+ "model.layers.16.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
153
+ "model.layers.16.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
154
+ "model.layers.16.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
155
+ "model.layers.16.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
156
+ "model.layers.16.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
157
+ "model.layers.16.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
158
+ "model.layers.16.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
159
+ "model.layers.17.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
160
+ "model.layers.17.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
161
+ "model.layers.17.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
162
+ "model.layers.17.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
163
+ "model.layers.17.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
164
+ "model.layers.17.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
165
+ "model.layers.17.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
166
+ "model.layers.17.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
167
+ "model.layers.17.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
168
+ "model.layers.17.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
169
+ "model.layers.17.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
170
+ "model.layers.17.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
171
+ "model.layers.17.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
172
+ "model.layers.17.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
173
+ "model.layers.17.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
174
+ "model.layers.17.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
175
+ "model.layers.17.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
176
+ "model.layers.18.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
177
+ "model.layers.18.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
178
+ "model.layers.18.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
179
+ "model.layers.18.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
180
+ "model.layers.18.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
181
+ "model.layers.18.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
182
+ "model.layers.18.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
183
+ "model.layers.18.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
184
+ "model.layers.18.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
185
+ "model.layers.18.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
186
+ "model.layers.18.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
187
+ "model.layers.18.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
188
+ "model.layers.18.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
189
+ "model.layers.18.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
190
+ "model.layers.18.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
191
+ "model.layers.18.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
192
+ "model.layers.18.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
193
+ "model.layers.19.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
194
+ "model.layers.19.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
195
+ "model.layers.19.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
196
+ "model.layers.19.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
197
+ "model.layers.19.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
198
+ "model.layers.19.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
199
+ "model.layers.19.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
200
+ "model.layers.19.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
201
+ "model.layers.19.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
202
+ "model.layers.19.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
203
+ "model.layers.19.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
204
+ "model.layers.19.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
205
+ "model.layers.19.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
206
+ "model.layers.19.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
207
+ "model.layers.19.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
208
+ "model.layers.19.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
209
+ "model.layers.19.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
210
+ "model.layers.2.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
211
+ "model.layers.2.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
212
+ "model.layers.2.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
213
+ "model.layers.2.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
214
+ "model.layers.2.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
215
+ "model.layers.2.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
216
+ "model.layers.2.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
217
+ "model.layers.2.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
218
+ "model.layers.2.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
219
+ "model.layers.2.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
220
+ "model.layers.2.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
221
+ "model.layers.2.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
222
+ "model.layers.2.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
223
+ "model.layers.2.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
224
+ "model.layers.2.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
225
+ "model.layers.2.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
226
+ "model.layers.2.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
227
+ "model.layers.20.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
228
+ "model.layers.20.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
229
+ "model.layers.20.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
230
+ "model.layers.20.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
231
+ "model.layers.20.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
232
+ "model.layers.20.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
233
+ "model.layers.20.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
234
+ "model.layers.20.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
235
+ "model.layers.20.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
236
+ "model.layers.20.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
237
+ "model.layers.20.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
238
+ "model.layers.20.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
239
+ "model.layers.20.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
240
+ "model.layers.20.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
241
+ "model.layers.20.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
242
+ "model.layers.20.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
243
+ "model.layers.20.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
244
+ "model.layers.21.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
245
+ "model.layers.21.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
246
+ "model.layers.21.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
247
+ "model.layers.21.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
248
+ "model.layers.21.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
249
+ "model.layers.21.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
250
+ "model.layers.21.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
251
+ "model.layers.21.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
252
+ "model.layers.21.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
253
+ "model.layers.21.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
254
+ "model.layers.21.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
255
+ "model.layers.21.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
256
+ "model.layers.21.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
257
+ "model.layers.21.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
258
+ "model.layers.21.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
259
+ "model.layers.21.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
260
+ "model.layers.21.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
261
+ "model.layers.22.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
262
+ "model.layers.22.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
263
+ "model.layers.22.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
264
+ "model.layers.22.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
265
+ "model.layers.22.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
266
+ "model.layers.22.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
267
+ "model.layers.22.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
268
+ "model.layers.22.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
269
+ "model.layers.22.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
270
+ "model.layers.22.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
271
+ "model.layers.22.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
272
+ "model.layers.22.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
273
+ "model.layers.22.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
274
+ "model.layers.22.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
275
+ "model.layers.22.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
276
+ "model.layers.22.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
277
+ "model.layers.22.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
278
+ "model.layers.23.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
279
+ "model.layers.23.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
280
+ "model.layers.23.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
281
+ "model.layers.23.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
282
+ "model.layers.23.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
283
+ "model.layers.23.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
284
+ "model.layers.23.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
285
+ "model.layers.23.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
286
+ "model.layers.23.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
287
+ "model.layers.23.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
288
+ "model.layers.23.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
289
+ "model.layers.23.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
290
+ "model.layers.23.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
291
+ "model.layers.23.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
292
+ "model.layers.23.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
293
+ "model.layers.23.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
294
+ "model.layers.23.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
295
+ "model.layers.24.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
296
+ "model.layers.24.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
297
+ "model.layers.24.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
298
+ "model.layers.24.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
299
+ "model.layers.24.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
300
+ "model.layers.24.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
301
+ "model.layers.24.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
302
+ "model.layers.24.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
303
+ "model.layers.24.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
304
+ "model.layers.24.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
305
+ "model.layers.24.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
306
+ "model.layers.24.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
307
+ "model.layers.24.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
308
+ "model.layers.24.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
309
+ "model.layers.24.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
310
+ "model.layers.24.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
311
+ "model.layers.24.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
312
+ "model.layers.25.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
313
+ "model.layers.25.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
314
+ "model.layers.25.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
315
+ "model.layers.25.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
316
+ "model.layers.25.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
317
+ "model.layers.25.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
318
+ "model.layers.25.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
319
+ "model.layers.25.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
320
+ "model.layers.25.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
321
+ "model.layers.25.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
322
+ "model.layers.25.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
323
+ "model.layers.25.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
324
+ "model.layers.25.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
325
+ "model.layers.25.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
326
+ "model.layers.25.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
327
+ "model.layers.25.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
328
+ "model.layers.25.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
329
+ "model.layers.26.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
330
+ "model.layers.26.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
331
+ "model.layers.26.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
332
+ "model.layers.26.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
333
+ "model.layers.26.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
334
+ "model.layers.26.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
335
+ "model.layers.26.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
336
+ "model.layers.26.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
337
+ "model.layers.26.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
338
+ "model.layers.26.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
339
+ "model.layers.26.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
340
+ "model.layers.26.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
341
+ "model.layers.26.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
342
+ "model.layers.26.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
343
+ "model.layers.26.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
344
+ "model.layers.26.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
345
+ "model.layers.26.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
346
+ "model.layers.27.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
347
+ "model.layers.27.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
348
+ "model.layers.27.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
349
+ "model.layers.27.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
350
+ "model.layers.27.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
351
+ "model.layers.27.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
352
+ "model.layers.27.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
353
+ "model.layers.27.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
354
+ "model.layers.27.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
355
+ "model.layers.27.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
356
+ "model.layers.27.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
357
+ "model.layers.27.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
358
+ "model.layers.27.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
359
+ "model.layers.27.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
360
+ "model.layers.27.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
361
+ "model.layers.27.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
362
+ "model.layers.27.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
363
+ "model.layers.28.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
364
+ "model.layers.28.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
365
+ "model.layers.28.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
366
+ "model.layers.28.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
367
+ "model.layers.28.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
368
+ "model.layers.28.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
369
+ "model.layers.28.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
370
+ "model.layers.28.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
371
+ "model.layers.28.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
372
+ "model.layers.28.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
373
+ "model.layers.28.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
374
+ "model.layers.28.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
375
+ "model.layers.28.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
376
+ "model.layers.28.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
377
+ "model.layers.28.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
378
+ "model.layers.28.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
379
+ "model.layers.28.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
380
+ "model.layers.29.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
381
+ "model.layers.29.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
382
+ "model.layers.29.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
383
+ "model.layers.29.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
384
+ "model.layers.29.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
385
+ "model.layers.29.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
386
+ "model.layers.29.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
387
+ "model.layers.29.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
388
+ "model.layers.29.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
389
+ "model.layers.29.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
390
+ "model.layers.29.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
391
+ "model.layers.29.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
392
+ "model.layers.29.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
393
+ "model.layers.29.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
394
+ "model.layers.29.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
395
+ "model.layers.29.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
396
+ "model.layers.29.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
397
+ "model.layers.3.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
398
+ "model.layers.3.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
399
+ "model.layers.3.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
400
+ "model.layers.3.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
401
+ "model.layers.3.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
402
+ "model.layers.3.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
403
+ "model.layers.3.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
404
+ "model.layers.3.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
405
+ "model.layers.3.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
406
+ "model.layers.3.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
407
+ "model.layers.3.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
408
+ "model.layers.3.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
409
+ "model.layers.3.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
410
+ "model.layers.3.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
411
+ "model.layers.3.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
412
+ "model.layers.3.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
413
+ "model.layers.3.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
414
+ "model.layers.30.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
415
+ "model.layers.30.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
416
+ "model.layers.30.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
417
+ "model.layers.30.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
418
+ "model.layers.30.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
419
+ "model.layers.30.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
420
+ "model.layers.30.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
421
+ "model.layers.30.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
422
+ "model.layers.30.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
423
+ "model.layers.30.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
424
+ "model.layers.30.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
425
+ "model.layers.30.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
426
+ "model.layers.30.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
427
+ "model.layers.30.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
428
+ "model.layers.30.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
429
+ "model.layers.30.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
430
+ "model.layers.30.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
431
+ "model.layers.31.attention.wo.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
432
+ "model.layers.31.attention.wo.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
433
+ "model.layers.31.attention.wo.weight": "pytorch_model-00002-of-00002.bin",
434
+ "model.layers.31.attention.wqkv.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
435
+ "model.layers.31.attention.wqkv.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
436
+ "model.layers.31.attention.wqkv.weight": "pytorch_model-00002-of-00002.bin",
437
+ "model.layers.31.attention_norm.weight": "pytorch_model-00002-of-00002.bin",
438
+ "model.layers.31.feed_forward.w1.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
439
+ "model.layers.31.feed_forward.w1.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
440
+ "model.layers.31.feed_forward.w1.weight": "pytorch_model-00002-of-00002.bin",
441
+ "model.layers.31.feed_forward.w2.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
442
+ "model.layers.31.feed_forward.w2.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
443
+ "model.layers.31.feed_forward.w2.weight": "pytorch_model-00002-of-00002.bin",
444
+ "model.layers.31.feed_forward.w3.Plora_A.weight": "pytorch_model-00002-of-00002.bin",
445
+ "model.layers.31.feed_forward.w3.Plora_B.weight": "pytorch_model-00002-of-00002.bin",
446
+ "model.layers.31.feed_forward.w3.weight": "pytorch_model-00002-of-00002.bin",
447
+ "model.layers.31.ffn_norm.weight": "pytorch_model-00002-of-00002.bin",
448
+ "model.layers.4.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
449
+ "model.layers.4.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
450
+ "model.layers.4.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
451
+ "model.layers.4.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
452
+ "model.layers.4.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
453
+ "model.layers.4.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
454
+ "model.layers.4.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
455
+ "model.layers.4.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
456
+ "model.layers.4.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
457
+ "model.layers.4.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
458
+ "model.layers.4.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
459
+ "model.layers.4.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
460
+ "model.layers.4.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
461
+ "model.layers.4.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
462
+ "model.layers.4.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
463
+ "model.layers.4.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
464
+ "model.layers.4.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
465
+ "model.layers.5.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
466
+ "model.layers.5.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
467
+ "model.layers.5.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
468
+ "model.layers.5.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
469
+ "model.layers.5.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
470
+ "model.layers.5.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
471
+ "model.layers.5.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
472
+ "model.layers.5.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
473
+ "model.layers.5.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
474
+ "model.layers.5.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
475
+ "model.layers.5.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
476
+ "model.layers.5.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
477
+ "model.layers.5.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
478
+ "model.layers.5.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
479
+ "model.layers.5.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
480
+ "model.layers.5.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
481
+ "model.layers.5.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
482
+ "model.layers.6.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
483
+ "model.layers.6.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
484
+ "model.layers.6.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
485
+ "model.layers.6.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
486
+ "model.layers.6.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
487
+ "model.layers.6.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
488
+ "model.layers.6.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
489
+ "model.layers.6.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
490
+ "model.layers.6.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
491
+ "model.layers.6.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
492
+ "model.layers.6.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
493
+ "model.layers.6.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
494
+ "model.layers.6.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
495
+ "model.layers.6.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
496
+ "model.layers.6.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
497
+ "model.layers.6.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
498
+ "model.layers.6.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
499
+ "model.layers.7.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
500
+ "model.layers.7.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
501
+ "model.layers.7.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
502
+ "model.layers.7.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
503
+ "model.layers.7.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
504
+ "model.layers.7.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
505
+ "model.layers.7.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
506
+ "model.layers.7.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
507
+ "model.layers.7.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
508
+ "model.layers.7.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
509
+ "model.layers.7.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
510
+ "model.layers.7.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
511
+ "model.layers.7.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
512
+ "model.layers.7.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
513
+ "model.layers.7.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
514
+ "model.layers.7.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
515
+ "model.layers.7.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
516
+ "model.layers.8.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
517
+ "model.layers.8.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
518
+ "model.layers.8.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
519
+ "model.layers.8.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
520
+ "model.layers.8.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
521
+ "model.layers.8.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
522
+ "model.layers.8.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
523
+ "model.layers.8.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
524
+ "model.layers.8.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
525
+ "model.layers.8.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
526
+ "model.layers.8.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
527
+ "model.layers.8.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
528
+ "model.layers.8.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
529
+ "model.layers.8.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
530
+ "model.layers.8.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
531
+ "model.layers.8.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
532
+ "model.layers.8.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
533
+ "model.layers.9.attention.wo.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
534
+ "model.layers.9.attention.wo.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
535
+ "model.layers.9.attention.wo.weight": "pytorch_model-00001-of-00002.bin",
536
+ "model.layers.9.attention.wqkv.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
537
+ "model.layers.9.attention.wqkv.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
538
+ "model.layers.9.attention.wqkv.weight": "pytorch_model-00001-of-00002.bin",
539
+ "model.layers.9.attention_norm.weight": "pytorch_model-00001-of-00002.bin",
540
+ "model.layers.9.feed_forward.w1.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
541
+ "model.layers.9.feed_forward.w1.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
542
+ "model.layers.9.feed_forward.w1.weight": "pytorch_model-00001-of-00002.bin",
543
+ "model.layers.9.feed_forward.w2.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
544
+ "model.layers.9.feed_forward.w2.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
545
+ "model.layers.9.feed_forward.w2.weight": "pytorch_model-00001-of-00002.bin",
546
+ "model.layers.9.feed_forward.w3.Plora_A.weight": "pytorch_model-00001-of-00002.bin",
547
+ "model.layers.9.feed_forward.w3.Plora_B.weight": "pytorch_model-00001-of-00002.bin",
548
+ "model.layers.9.feed_forward.w3.weight": "pytorch_model-00001-of-00002.bin",
549
+ "model.layers.9.ffn_norm.weight": "pytorch_model-00001-of-00002.bin",
550
+ "model.norm.weight": "pytorch_model-00002-of-00002.bin",
551
+ "model.tok_embeddings.weight": "pytorch_model-00001-of-00002.bin",
552
+ "output.weight": "pytorch_model-00002-of-00002.bin"
553
+ }
554
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "</s>",
5
+ "unk_token": "<unk>"
6
+ }
tokenization_internlm.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for IntermLM."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+ from transformers.tokenization_utils import PreTrainedTokenizer
28
+ from transformers.utils import logging
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
33
+
34
+ PRETRAINED_VOCAB_FILES_MAP = {}
35
+
36
+
37
+ class InternLMTokenizer(PreTrainedTokenizer):
38
+ """
39
+ Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ Path to the vocabulary file.
44
+ """
45
+
46
+ vocab_files_names = VOCAB_FILES_NAMES
47
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
48
+ model_input_names = ["input_ids", "attention_mask"]
49
+ _auto_class = "AutoTokenizer"
50
+
51
+ def __init__(
52
+ self,
53
+ vocab_file,
54
+ unk_token="<unk>",
55
+ bos_token="<s>",
56
+ eos_token="</s>",
57
+ pad_token="</s>",
58
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
59
+ add_bos_token=True,
60
+ add_eos_token=False,
61
+ decode_with_prefix_space=False,
62
+ clean_up_tokenization_spaces=False,
63
+ **kwargs,
64
+ ):
65
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
66
+ self.vocab_file = vocab_file
67
+ self.add_bos_token = add_bos_token
68
+ self.add_eos_token = add_eos_token
69
+ self.decode_with_prefix_space = decode_with_prefix_space
70
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
71
+ self.sp_model.Load(vocab_file)
72
+ self._no_prefix_space_tokens = None
73
+ super().__init__(
74
+ bos_token=bos_token,
75
+ eos_token=eos_token,
76
+ unk_token=unk_token,
77
+ pad_token=pad_token,
78
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
79
+ **kwargs,
80
+ )
81
+
82
+ """ Initialization"""
83
+
84
+ @property
85
+ def no_prefix_space_tokens(self):
86
+ if self._no_prefix_space_tokens is None:
87
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
88
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
89
+ return self._no_prefix_space_tokens
90
+
91
+ @property
92
+ def vocab_size(self):
93
+ """Returns vocab size"""
94
+ return self.sp_model.get_piece_size()
95
+
96
+ @property
97
+ def bos_token_id(self) -> Optional[int]:
98
+ return self.sp_model.bos_id()
99
+
100
+ @property
101
+ def eos_token_id(self) -> Optional[int]:
102
+ return self.sp_model.eos_id()
103
+
104
+ def get_vocab(self):
105
+ """Returns vocab as a dict"""
106
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
107
+ vocab.update(self.added_tokens_encoder)
108
+ return vocab
109
+
110
+ def _tokenize(self, text):
111
+ """Returns a tokenized string."""
112
+ return self.sp_model.encode(text, out_type=str)
113
+
114
+ def _convert_token_to_id(self, token):
115
+ """Converts a token (str) in an id using the vocab."""
116
+ return self.sp_model.piece_to_id(token)
117
+
118
+ def _convert_id_to_token(self, index):
119
+ """Converts an index (integer) in a token (str) using the vocab."""
120
+ token = self.sp_model.IdToPiece(index)
121
+ return token
122
+
123
+ def _maybe_add_prefix_space(self, tokens, decoded):
124
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
125
+ return " " + decoded
126
+ else:
127
+ return decoded
128
+
129
+ def convert_tokens_to_string(self, tokens):
130
+ """Converts a sequence of tokens (string) in a single string."""
131
+ current_sub_tokens = []
132
+ out_string = ""
133
+ prev_is_special = False
134
+ for token in tokens:
135
+ # make sure that special tokens are not decoded using sentencepiece model
136
+ if token in self.all_special_tokens:
137
+ if not prev_is_special:
138
+ out_string += " "
139
+ out_string += self.sp_model.decode(current_sub_tokens) + token
140
+ prev_is_special = True
141
+ current_sub_tokens = []
142
+ else:
143
+ current_sub_tokens.append(token)
144
+ prev_is_special = False
145
+ out_string += self.sp_model.decode(current_sub_tokens)
146
+ out_string = self.clean_up_tokenization(out_string)
147
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
148
+ return out_string[1:]
149
+
150
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
151
+ """
152
+ Save the vocabulary and special tokens file to a directory.
153
+
154
+ Args:
155
+ save_directory (`str`):
156
+ The directory in which to save the vocabulary.
157
+
158
+ Returns:
159
+ `Tuple(str)`: Paths to the files saved.
160
+ """
161
+ if not os.path.isdir(save_directory):
162
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
163
+ return
164
+ out_vocab_file = os.path.join(
165
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
166
+ )
167
+
168
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
169
+ copyfile(self.vocab_file, out_vocab_file)
170
+ elif not os.path.isfile(self.vocab_file):
171
+ with open(out_vocab_file, "wb") as fi:
172
+ content_spiece_model = self.sp_model.serialized_model_proto()
173
+ fi.write(content_spiece_model)
174
+
175
+ return (out_vocab_file,)
176
+
177
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
178
+ if self.add_bos_token:
179
+ bos_token_ids = [self.bos_token_id]
180
+ else:
181
+ bos_token_ids = []
182
+
183
+ output = bos_token_ids + token_ids_0
184
+
185
+ if token_ids_1 is not None:
186
+ output = output + token_ids_1
187
+
188
+ if self.add_eos_token:
189
+ output = output + [self.eos_token_id]
190
+
191
+ return output
192
+
193
+ def get_special_tokens_mask(
194
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
195
+ ) -> List[int]:
196
+ """
197
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
198
+ special tokens using the tokenizer `prepare_for_model` method.
199
+
200
+ Args:
201
+ token_ids_0 (`List[int]`):
202
+ List of IDs.
203
+ token_ids_1 (`List[int]`, *optional*):
204
+ Optional second list of IDs for sequence pairs.
205
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
206
+ Whether or not the token list is already formatted with special tokens for the model.
207
+
208
+ Returns:
209
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
210
+ """
211
+ if already_has_special_tokens:
212
+ return super().get_special_tokens_mask(
213
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
214
+ )
215
+
216
+ if token_ids_1 is None:
217
+ return [1] + ([0] * len(token_ids_0)) + [1]
218
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
219
+
220
+ def create_token_type_ids_from_sequences(
221
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
222
+ ) -> List[int]:
223
+ """
224
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
225
+ use of token type ids, therefore a list of zeros is returned.
226
+
227
+ Args:
228
+ token_ids_0 (`List[int]`):
229
+ List of IDs.
230
+ token_ids_1 (`List[int]`, *optional*):
231
+ Optional second list of IDs for sequence pairs.
232
+
233
+ Returns:
234
+ `List[int]`: List of zeros.
235
+ """
236
+ eos = [self.eos_token_id]
237
+
238
+ if token_ids_1 is None:
239
+ return len(token_ids_0 + eos) * [0]
240
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_internlm.InternLMTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "bos_token": "<s>",
9
+ "clean_up_tokenization_spaces": false,
10
+ "eos_token": "</s>",
11
+ "model_max_length": 1000000000000000019884624838656,
12
+ "pad_token": "</s>",
13
+ "padding_side": "right",
14
+ "tokenizer_class": "InternLMTokenizer",
15
+ "unk_token": "<unk>"
16
+ }