albertvillanova HF staff commited on
Commit
b8dd5d7
1 Parent(s): 1cdcf07
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +263 -134
  2. all_languages/test-00000-of-00001.parquet +3 -0
  3. all_languages/train-00000-of-00004.parquet +3 -0
  4. all_languages/train-00001-of-00004.parquet +3 -0
  5. all_languages/train-00002-of-00004.parquet +3 -0
  6. all_languages/train-00003-of-00004.parquet +3 -0
  7. all_languages/validation-00000-of-00001.parquet +3 -0
  8. ar/test-00000-of-00001.parquet +3 -0
  9. ar/train-00000-of-00001.parquet +3 -0
  10. ar/validation-00000-of-00001.parquet +3 -0
  11. bg/test-00000-of-00001.parquet +3 -0
  12. bg/train-00000-of-00001.parquet +3 -0
  13. bg/validation-00000-of-00001.parquet +3 -0
  14. dataset_infos.json +0 -1
  15. de/test-00000-of-00001.parquet +3 -0
  16. de/train-00000-of-00001.parquet +3 -0
  17. de/validation-00000-of-00001.parquet +3 -0
  18. el/test-00000-of-00001.parquet +3 -0
  19. el/train-00000-of-00001.parquet +3 -0
  20. el/validation-00000-of-00001.parquet +3 -0
  21. en/test-00000-of-00001.parquet +3 -0
  22. en/train-00000-of-00001.parquet +3 -0
  23. en/validation-00000-of-00001.parquet +3 -0
  24. es/test-00000-of-00001.parquet +3 -0
  25. es/train-00000-of-00001.parquet +3 -0
  26. es/validation-00000-of-00001.parquet +3 -0
  27. fr/test-00000-of-00001.parquet +3 -0
  28. fr/train-00000-of-00001.parquet +3 -0
  29. fr/validation-00000-of-00001.parquet +3 -0
  30. hi/test-00000-of-00001.parquet +3 -0
  31. hi/train-00000-of-00001.parquet +3 -0
  32. hi/validation-00000-of-00001.parquet +3 -0
  33. ru/test-00000-of-00001.parquet +3 -0
  34. ru/train-00000-of-00001.parquet +3 -0
  35. ru/validation-00000-of-00001.parquet +3 -0
  36. sw/test-00000-of-00001.parquet +3 -0
  37. sw/train-00000-of-00001.parquet +3 -0
  38. sw/validation-00000-of-00001.parquet +3 -0
  39. th/test-00000-of-00001.parquet +3 -0
  40. th/train-00000-of-00001.parquet +3 -0
  41. th/validation-00000-of-00001.parquet +3 -0
  42. tr/test-00000-of-00001.parquet +3 -0
  43. tr/train-00000-of-00001.parquet +3 -0
  44. tr/validation-00000-of-00001.parquet +3 -0
  45. ur/test-00000-of-00001.parquet +3 -0
  46. ur/train-00000-of-00001.parquet +3 -0
  47. ur/validation-00000-of-00001.parquet +3 -0
  48. vi/test-00000-of-00001.parquet +3 -0
  49. vi/train-00000-of-00001.parquet +3 -0
  50. vi/validation-00000-of-00001.parquet +3 -0
README.md CHANGED
@@ -18,6 +18,66 @@ language:
18
  paperswithcode_id: xnli
19
  pretty_name: Cross-lingual Natural Language Inference
20
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  - config_name: ar
22
  features:
23
  - name: premise
@@ -33,16 +93,16 @@ dataset_info:
33
  '2': contradiction
34
  splits:
35
  - name: train
36
- num_bytes: 107399934
37
  num_examples: 392702
38
  - name: test
39
- num_bytes: 1294561
40
  num_examples: 5010
41
  - name: validation
42
- num_bytes: 633009
43
  num_examples: 2490
44
- download_size: 483963712
45
- dataset_size: 109327504
46
  - config_name: bg
47
  features:
48
  - name: premise
@@ -58,16 +118,16 @@ dataset_info:
58
  '2': contradiction
59
  splits:
60
  - name: train
61
- num_bytes: 125973545
62
  num_examples: 392702
63
  - name: test
64
- num_bytes: 1573042
65
  num_examples: 5010
66
  - name: validation
67
- num_bytes: 774069
68
  num_examples: 2490
69
- download_size: 483963712
70
- dataset_size: 128320656
71
  - config_name: de
72
  features:
73
  - name: premise
@@ -83,16 +143,16 @@ dataset_info:
83
  '2': contradiction
84
  splits:
85
  - name: train
86
- num_bytes: 84684460
87
  num_examples: 392702
88
  - name: test
89
- num_bytes: 996496
90
  num_examples: 5010
91
  - name: validation
92
- num_bytes: 494612
93
  num_examples: 2490
94
- download_size: 483963712
95
- dataset_size: 86175568
96
  - config_name: el
97
  features:
98
  - name: premise
@@ -108,16 +168,16 @@ dataset_info:
108
  '2': contradiction
109
  splits:
110
  - name: train
111
- num_bytes: 139753678
112
  num_examples: 392702
113
  - name: test
114
- num_bytes: 1704793
115
  num_examples: 5010
116
  - name: validation
117
- num_bytes: 841234
118
  num_examples: 2490
119
- download_size: 483963712
120
- dataset_size: 142299705
121
  - config_name: en
122
  features:
123
  - name: premise
@@ -133,16 +193,16 @@ dataset_info:
133
  '2': contradiction
134
  splits:
135
  - name: train
136
- num_bytes: 74444346
137
  num_examples: 392702
138
  - name: test
139
- num_bytes: 875142
140
  num_examples: 5010
141
  - name: validation
142
- num_bytes: 433471
143
  num_examples: 2490
144
- download_size: 483963712
145
- dataset_size: 75752959
146
  - config_name: es
147
  features:
148
  - name: premise
@@ -158,16 +218,16 @@ dataset_info:
158
  '2': contradiction
159
  splits:
160
  - name: train
161
- num_bytes: 81383604
162
  num_examples: 392702
163
  - name: test
164
- num_bytes: 969821
165
  num_examples: 5010
166
  - name: validation
167
- num_bytes: 478430
168
  num_examples: 2490
169
- download_size: 483963712
170
- dataset_size: 82831855
171
  - config_name: fr
172
  features:
173
  - name: premise
@@ -183,16 +243,16 @@ dataset_info:
183
  '2': contradiction
184
  splits:
185
  - name: train
186
- num_bytes: 85809099
187
  num_examples: 392702
188
  - name: test
189
- num_bytes: 1029247
190
  num_examples: 5010
191
  - name: validation
192
- num_bytes: 510112
193
  num_examples: 2490
194
- download_size: 483963712
195
- dataset_size: 87348458
196
  - config_name: hi
197
  features:
198
  - name: premise
@@ -208,16 +268,16 @@ dataset_info:
208
  '2': contradiction
209
  splits:
210
  - name: train
211
- num_bytes: 170594284
212
  num_examples: 392702
213
  - name: test
214
- num_bytes: 2073081
215
  num_examples: 5010
216
  - name: validation
217
- num_bytes: 1023923
218
  num_examples: 2490
219
- download_size: 483963712
220
- dataset_size: 173691288
221
  - config_name: ru
222
  features:
223
  - name: premise
@@ -233,16 +293,16 @@ dataset_info:
233
  '2': contradiction
234
  splits:
235
  - name: train
236
- num_bytes: 129859935
237
  num_examples: 392702
238
  - name: test
239
- num_bytes: 1603474
240
  num_examples: 5010
241
  - name: validation
242
- num_bytes: 786450
243
  num_examples: 2490
244
- download_size: 483963712
245
- dataset_size: 132249859
246
  - config_name: sw
247
  features:
248
  - name: premise
@@ -258,16 +318,16 @@ dataset_info:
258
  '2': contradiction
259
  splits:
260
  - name: train
261
- num_bytes: 69286045
262
  num_examples: 392702
263
  - name: test
264
- num_bytes: 871659
265
  num_examples: 5010
266
  - name: validation
267
- num_bytes: 429858
268
  num_examples: 2490
269
- download_size: 483963712
270
- dataset_size: 70587562
271
  - config_name: th
272
  features:
273
  - name: premise
@@ -283,16 +343,16 @@ dataset_info:
283
  '2': contradiction
284
  splits:
285
  - name: train
286
- num_bytes: 176063212
287
  num_examples: 392702
288
  - name: test
289
- num_bytes: 2147023
290
  num_examples: 5010
291
  - name: validation
292
- num_bytes: 1061168
293
  num_examples: 2490
294
- download_size: 483963712
295
- dataset_size: 179271403
296
  - config_name: tr
297
  features:
298
  - name: premise
@@ -308,16 +368,16 @@ dataset_info:
308
  '2': contradiction
309
  splits:
310
  - name: train
311
- num_bytes: 71637460
312
  num_examples: 392702
313
  - name: test
314
- num_bytes: 934942
315
  num_examples: 5010
316
  - name: validation
317
- num_bytes: 459316
318
  num_examples: 2490
319
- download_size: 483963712
320
- dataset_size: 73031718
321
  - config_name: ur
322
  features:
323
  - name: premise
@@ -333,16 +393,16 @@ dataset_info:
333
  '2': contradiction
334
  splits:
335
  - name: train
336
- num_bytes: 96441806
337
  num_examples: 392702
338
  - name: test
339
- num_bytes: 1416249
340
  num_examples: 5010
341
  - name: validation
342
- num_bytes: 699960
343
  num_examples: 2490
344
- download_size: 483963712
345
- dataset_size: 98558015
346
  - config_name: vi
347
  features:
348
  - name: premise
@@ -358,16 +418,16 @@ dataset_info:
358
  '2': contradiction
359
  splits:
360
  - name: train
361
- num_bytes: 101417750
362
  num_examples: 392702
363
  - name: test
364
- num_bytes: 1190225
365
  num_examples: 5010
366
  - name: validation
367
- num_bytes: 590688
368
  num_examples: 2490
369
- download_size: 483963712
370
- dataset_size: 103198663
371
  - config_name: zh
372
  features:
373
  - name: premise
@@ -383,76 +443,145 @@ dataset_info:
383
  '2': contradiction
384
  splits:
385
  - name: train
386
- num_bytes: 72225161
387
  num_examples: 392702
388
  - name: test
389
- num_bytes: 777937
390
  num_examples: 5010
391
  - name: validation
392
- num_bytes: 384859
393
  num_examples: 2490
394
- download_size: 483963712
395
- dataset_size: 73387957
 
396
  - config_name: all_languages
397
- features:
398
- - name: premise
399
- dtype:
400
- translation:
401
- languages:
402
- - ar
403
- - bg
404
- - de
405
- - el
406
- - en
407
- - es
408
- - fr
409
- - hi
410
- - ru
411
- - sw
412
- - th
413
- - tr
414
- - ur
415
- - vi
416
- - zh
417
- - name: hypothesis
418
- dtype:
419
- translation_variable_languages:
420
- languages:
421
- - ar
422
- - bg
423
- - de
424
- - el
425
- - en
426
- - es
427
- - fr
428
- - hi
429
- - ru
430
- - sw
431
- - th
432
- - tr
433
- - ur
434
- - vi
435
- - zh
436
- num_languages: 15
437
- - name: label
438
- dtype:
439
- class_label:
440
- names:
441
- '0': entailment
442
- '1': neutral
443
- '2': contradiction
444
- splits:
445
- - name: train
446
- num_bytes: 1581474731
447
- num_examples: 392702
448
- - name: test
449
- num_bytes: 19387508
450
- num_examples: 5010
451
- - name: validation
452
- num_bytes: 9566255
453
- num_examples: 2490
454
- download_size: 483963712
455
- dataset_size: 1610428494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  ---
457
 
458
  # Dataset Card for "xnli"
 
18
  paperswithcode_id: xnli
19
  pretty_name: Cross-lingual Natural Language Inference
20
  dataset_info:
21
+ - config_name: all_languages
22
+ features:
23
+ - name: premise
24
+ dtype:
25
+ translation:
26
+ languages:
27
+ - ar
28
+ - bg
29
+ - de
30
+ - el
31
+ - en
32
+ - es
33
+ - fr
34
+ - hi
35
+ - ru
36
+ - sw
37
+ - th
38
+ - tr
39
+ - ur
40
+ - vi
41
+ - zh
42
+ - name: hypothesis
43
+ dtype:
44
+ translation_variable_languages:
45
+ languages:
46
+ - ar
47
+ - bg
48
+ - de
49
+ - el
50
+ - en
51
+ - es
52
+ - fr
53
+ - hi
54
+ - ru
55
+ - sw
56
+ - th
57
+ - tr
58
+ - ur
59
+ - vi
60
+ - zh
61
+ num_languages: 15
62
+ - name: label
63
+ dtype:
64
+ class_label:
65
+ names:
66
+ '0': entailment
67
+ '1': neutral
68
+ '2': contradiction
69
+ splits:
70
+ - name: train
71
+ num_bytes: 1581471691
72
+ num_examples: 392702
73
+ - name: test
74
+ num_bytes: 19387432
75
+ num_examples: 5010
76
+ - name: validation
77
+ num_bytes: 9566179
78
+ num_examples: 2490
79
+ download_size: 963942271
80
+ dataset_size: 1610425302
81
  - config_name: ar
82
  features:
83
  - name: premise
 
93
  '2': contradiction
94
  splits:
95
  - name: train
96
+ num_bytes: 107399614
97
  num_examples: 392702
98
  - name: test
99
+ num_bytes: 1294553
100
  num_examples: 5010
101
  - name: validation
102
+ num_bytes: 633001
103
  num_examples: 2490
104
+ download_size: 59215902
105
+ dataset_size: 109327168
106
  - config_name: bg
107
  features:
108
  - name: premise
 
118
  '2': contradiction
119
  splits:
120
  - name: train
121
+ num_bytes: 125973225
122
  num_examples: 392702
123
  - name: test
124
+ num_bytes: 1573034
125
  num_examples: 5010
126
  - name: validation
127
+ num_bytes: 774061
128
  num_examples: 2490
129
+ download_size: 66117878
130
+ dataset_size: 128320320
131
  - config_name: de
132
  features:
133
  - name: premise
 
143
  '2': contradiction
144
  splits:
145
  - name: train
146
+ num_bytes: 84684140
147
  num_examples: 392702
148
  - name: test
149
+ num_bytes: 996488
150
  num_examples: 5010
151
  - name: validation
152
+ num_bytes: 494604
153
  num_examples: 2490
154
+ download_size: 55973883
155
+ dataset_size: 86175232
156
  - config_name: el
157
  features:
158
  - name: premise
 
168
  '2': contradiction
169
  splits:
170
  - name: train
171
+ num_bytes: 139753358
172
  num_examples: 392702
173
  - name: test
174
+ num_bytes: 1704785
175
  num_examples: 5010
176
  - name: validation
177
+ num_bytes: 841226
178
  num_examples: 2490
179
+ download_size: 74551247
180
+ dataset_size: 142299369
181
  - config_name: en
182
  features:
183
  - name: premise
 
193
  '2': contradiction
194
  splits:
195
  - name: train
196
+ num_bytes: 74444026
197
  num_examples: 392702
198
  - name: test
199
+ num_bytes: 875134
200
  num_examples: 5010
201
  - name: validation
202
+ num_bytes: 433463
203
  num_examples: 2490
204
+ download_size: 50627367
205
+ dataset_size: 75752623
206
  - config_name: es
207
  features:
208
  - name: premise
 
218
  '2': contradiction
219
  splits:
220
  - name: train
221
+ num_bytes: 81383284
222
  num_examples: 392702
223
  - name: test
224
+ num_bytes: 969813
225
  num_examples: 5010
226
  - name: validation
227
+ num_bytes: 478422
228
  num_examples: 2490
229
+ download_size: 53677157
230
+ dataset_size: 82831519
231
  - config_name: fr
232
  features:
233
  - name: premise
 
243
  '2': contradiction
244
  splits:
245
  - name: train
246
+ num_bytes: 85808779
247
  num_examples: 392702
248
  - name: test
249
+ num_bytes: 1029239
250
  num_examples: 5010
251
  - name: validation
252
+ num_bytes: 510104
253
  num_examples: 2490
254
+ download_size: 55968680
255
+ dataset_size: 87348122
256
  - config_name: hi
257
  features:
258
  - name: premise
 
268
  '2': contradiction
269
  splits:
270
  - name: train
271
+ num_bytes: 170593964
272
  num_examples: 392702
273
  - name: test
274
+ num_bytes: 2073073
275
  num_examples: 5010
276
  - name: validation
277
+ num_bytes: 1023915
278
  num_examples: 2490
279
+ download_size: 70908548
280
+ dataset_size: 173690952
281
  - config_name: ru
282
  features:
283
  - name: premise
 
293
  '2': contradiction
294
  splits:
295
  - name: train
296
+ num_bytes: 129859615
297
  num_examples: 392702
298
  - name: test
299
+ num_bytes: 1603466
300
  num_examples: 5010
301
  - name: validation
302
+ num_bytes: 786442
303
  num_examples: 2490
304
+ download_size: 70702606
305
+ dataset_size: 132249523
306
  - config_name: sw
307
  features:
308
  - name: premise
 
318
  '2': contradiction
319
  splits:
320
  - name: train
321
+ num_bytes: 69285725
322
  num_examples: 392702
323
  - name: test
324
+ num_bytes: 871651
325
  num_examples: 5010
326
  - name: validation
327
+ num_bytes: 429850
328
  num_examples: 2490
329
+ download_size: 45564152
330
+ dataset_size: 70587226
331
  - config_name: th
332
  features:
333
  - name: premise
 
343
  '2': contradiction
344
  splits:
345
  - name: train
346
+ num_bytes: 176062892
347
  num_examples: 392702
348
  - name: test
349
+ num_bytes: 2147015
350
  num_examples: 5010
351
  - name: validation
352
+ num_bytes: 1061160
353
  num_examples: 2490
354
+ download_size: 77222045
355
+ dataset_size: 179271067
356
  - config_name: tr
357
  features:
358
  - name: premise
 
368
  '2': contradiction
369
  splits:
370
  - name: train
371
+ num_bytes: 71637140
372
  num_examples: 392702
373
  - name: test
374
+ num_bytes: 934934
375
  num_examples: 5010
376
  - name: validation
377
+ num_bytes: 459308
378
  num_examples: 2490
379
+ download_size: 48509680
380
+ dataset_size: 73031382
381
  - config_name: ur
382
  features:
383
  - name: premise
 
393
  '2': contradiction
394
  splits:
395
  - name: train
396
+ num_bytes: 96441486
397
  num_examples: 392702
398
  - name: test
399
+ num_bytes: 1416241
400
  num_examples: 5010
401
  - name: validation
402
+ num_bytes: 699952
403
  num_examples: 2490
404
+ download_size: 46682785
405
+ dataset_size: 98557679
406
  - config_name: vi
407
  features:
408
  - name: premise
 
418
  '2': contradiction
419
  splits:
420
  - name: train
421
+ num_bytes: 101417430
422
  num_examples: 392702
423
  - name: test
424
+ num_bytes: 1190217
425
  num_examples: 5010
426
  - name: validation
427
+ num_bytes: 590680
428
  num_examples: 2490
429
+ download_size: 57690058
430
+ dataset_size: 103198327
431
  - config_name: zh
432
  features:
433
  - name: premise
 
443
  '2': contradiction
444
  splits:
445
  - name: train
446
+ num_bytes: 72224841
447
  num_examples: 392702
448
  - name: test
449
+ num_bytes: 777929
450
  num_examples: 5010
451
  - name: validation
452
+ num_bytes: 384851
453
  num_examples: 2490
454
+ download_size: 48269855
455
+ dataset_size: 73387621
456
+ configs:
457
  - config_name: all_languages
458
+ data_files:
459
+ - split: train
460
+ path: all_languages/train-*
461
+ - split: test
462
+ path: all_languages/test-*
463
+ - split: validation
464
+ path: all_languages/validation-*
465
+ - config_name: ar
466
+ data_files:
467
+ - split: train
468
+ path: ar/train-*
469
+ - split: test
470
+ path: ar/test-*
471
+ - split: validation
472
+ path: ar/validation-*
473
+ - config_name: bg
474
+ data_files:
475
+ - split: train
476
+ path: bg/train-*
477
+ - split: test
478
+ path: bg/test-*
479
+ - split: validation
480
+ path: bg/validation-*
481
+ - config_name: de
482
+ data_files:
483
+ - split: train
484
+ path: de/train-*
485
+ - split: test
486
+ path: de/test-*
487
+ - split: validation
488
+ path: de/validation-*
489
+ - config_name: el
490
+ data_files:
491
+ - split: train
492
+ path: el/train-*
493
+ - split: test
494
+ path: el/test-*
495
+ - split: validation
496
+ path: el/validation-*
497
+ - config_name: en
498
+ data_files:
499
+ - split: train
500
+ path: en/train-*
501
+ - split: test
502
+ path: en/test-*
503
+ - split: validation
504
+ path: en/validation-*
505
+ - config_name: es
506
+ data_files:
507
+ - split: train
508
+ path: es/train-*
509
+ - split: test
510
+ path: es/test-*
511
+ - split: validation
512
+ path: es/validation-*
513
+ - config_name: fr
514
+ data_files:
515
+ - split: train
516
+ path: fr/train-*
517
+ - split: test
518
+ path: fr/test-*
519
+ - split: validation
520
+ path: fr/validation-*
521
+ - config_name: hi
522
+ data_files:
523
+ - split: train
524
+ path: hi/train-*
525
+ - split: test
526
+ path: hi/test-*
527
+ - split: validation
528
+ path: hi/validation-*
529
+ - config_name: ru
530
+ data_files:
531
+ - split: train
532
+ path: ru/train-*
533
+ - split: test
534
+ path: ru/test-*
535
+ - split: validation
536
+ path: ru/validation-*
537
+ - config_name: sw
538
+ data_files:
539
+ - split: train
540
+ path: sw/train-*
541
+ - split: test
542
+ path: sw/test-*
543
+ - split: validation
544
+ path: sw/validation-*
545
+ - config_name: th
546
+ data_files:
547
+ - split: train
548
+ path: th/train-*
549
+ - split: test
550
+ path: th/test-*
551
+ - split: validation
552
+ path: th/validation-*
553
+ - config_name: tr
554
+ data_files:
555
+ - split: train
556
+ path: tr/train-*
557
+ - split: test
558
+ path: tr/test-*
559
+ - split: validation
560
+ path: tr/validation-*
561
+ - config_name: ur
562
+ data_files:
563
+ - split: train
564
+ path: ur/train-*
565
+ - split: test
566
+ path: ur/test-*
567
+ - split: validation
568
+ path: ur/validation-*
569
+ - config_name: vi
570
+ data_files:
571
+ - split: train
572
+ path: vi/train-*
573
+ - split: test
574
+ path: vi/test-*
575
+ - split: validation
576
+ path: vi/validation-*
577
+ - config_name: zh
578
+ data_files:
579
+ - split: train
580
+ path: zh/train-*
581
+ - split: test
582
+ path: zh/test-*
583
+ - split: validation
584
+ path: zh/validation-*
585
  ---
586
 
587
  # Dataset Card for "xnli"
all_languages/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:599e3a0191403f19cbe802afdf69841152000b41eaed725e4f463d432c0ffb49
3
+ size 6769722
all_languages/train-00000-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:beeb34bbb7779413d78c09f5746baf05b7a79ab37ff063f882f201412c437ddb
3
+ size 237658468
all_languages/train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f3365b09a00408fa6b1de52e14f915decedab864376a179cc5f65937044a2c0
3
+ size 238583683
all_languages/train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:340d804e040607dee5c5b0581ccfd390901255129c4bfb4f81e9e7605b76a0d7
3
+ size 238115767
all_languages/train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d05011ebb98705d0cbc5482fbc555f146dbc4e02890efa28e7774748539f5737
3
+ size 239422128
all_languages/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e6263b0872a3914c9bc165bfe3883e433aa2066c3fa3b9d142829a9b122518
3
+ size 3392503
ar/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89521231aa5f8404c46fcc5d421a9453819ca48bb99590680fa31fb8c82cf8bd
3
+ size 391980
ar/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a27850bd0e20411f7c08e7c2247413c0050669090ef23cb5263d138be937e89
3
+ size 58630165
ar/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8df098db4682a6a97dc4a08be518b60e58112f0e32df2d4c4c933e34db577cd3
3
+ size 193757
bg/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:333a5e84e09415f6a80dd285c4aa64d164bf721237b3e3d34892ce72066c92c1
3
+ size 447341
bg/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df65135e5813d4a42b3fd8e018ebfaecd981f7c313bbcfd7288e2019f0f4296c
3
+ size 65447048
bg/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfd1a75d8b1c82b97d857aa91cd9bf08e75d1ea58ab43109c2644e82079ac981
3
+ size 223489
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"ar": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ar", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 107399934, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1294561, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 633009, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 109327504, "size_in_bytes": 593291216}, "bg": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "bg", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 125973545, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1573042, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 774069, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 128320656, "size_in_bytes": 612284368}, "de": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "de", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 84684460, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 996496, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 494612, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 86175568, "size_in_bytes": 570139280}, "el": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "el", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 139753678, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1704793, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 841234, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 142299705, "size_in_bytes": 626263417}, "en": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "en", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 74444346, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 875142, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 433471, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 75752959, "size_in_bytes": 559716671}, "es": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "es", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 81383604, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 969821, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 478430, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 82831855, "size_in_bytes": 566795567}, "fr": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "fr", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 85809099, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1029247, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 510112, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 87348458, "size_in_bytes": 571312170}, "hi": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "hi", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 170594284, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 2073081, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 1023923, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 173691288, "size_in_bytes": 657655000}, "ru": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ru", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 129859935, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1603474, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 786450, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 132249859, "size_in_bytes": 616213571}, "sw": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "sw", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 69286045, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 871659, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 429858, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 70587562, "size_in_bytes": 554551274}, "th": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "th", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 176063212, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 2147023, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 1061168, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 179271403, "size_in_bytes": 663235115}, "tr": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "tr", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 71637460, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 934942, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 459316, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 73031718, "size_in_bytes": 556995430}, "ur": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "ur", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 96441806, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1416249, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 699960, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 98558015, "size_in_bytes": 582521727}, "vi": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "vi", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 101417750, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 1190225, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 590688, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 103198663, "size_in_bytes": 587162375}, "zh": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "zh", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 72225161, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 777937, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 384859, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 73387957, "size_in_bytes": 557351669}, "all_languages": {"description": "XNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n", "citation": "@InProceedings{conneau2018xnli,\n author = {Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin},\n title = {XNLI: Evaluating Cross-lingual Sentence Representations},\n booktitle = {Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n location = {Brussels, Belgium},\n}", "homepage": "https://www.nyu.edu/projects/bowman/xnli/", "license": "", "features": {"premise": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "id": null, "_type": "Translation"}, "hypothesis": {"languages": ["ar", "bg", "de", "el", "en", "es", "fr", "hi", "ru", "sw", "th", "tr", "ur", "vi", "zh"], "num_languages": 15, "id": null, "_type": "TranslationVariableLanguages"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "xnli", "config_name": "all_languages", "version": {"version_str": "1.1.0", "description": "", "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1581474731, "num_examples": 392702, "dataset_name": "xnli"}, "test": {"name": "test", "num_bytes": 19387508, "num_examples": 5010, "dataset_name": "xnli"}, "validation": {"name": "validation", "num_bytes": 9566255, "num_examples": 2490, "dataset_name": "xnli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/XNLI/XNLI-MT-1.0.zip": {"num_bytes": 466098360, "checksum": "f732517ba2fb1d550e9f3c2aabaef6017c91ee2dcec90e878f138764d224db05"}, "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip": {"num_bytes": 17865352, "checksum": "4ba1d5e1afdb7161f0f23c66dc787802ccfa8a25a3ddd3b165a35e50df346ab1"}}, "download_size": 483963712, "post_processing_size": null, "dataset_size": 1610428494, "size_in_bytes": 2094392206}}
 
 
de/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c25f5695d21673dbe966bac7b3c6a1e2df369431976928565c3cc56838f5632b
3
+ size 356132
de/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72b2d2adf469509cc6917213feb9ff17bc1919889e3f0dbe848e01660602ec7b
3
+ size 55436761
de/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68f6b30f13ba0fb92acf2567edd7cb1ec26a6b8b38c2274eb6f140fb7075f66e
3
+ size 180990
el/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:971110ccb25026cd75d8f8f0e0444877f8e9da7425b55be5fb97de74f9276e5b
3
+ size 489529
el/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2663e218de5250a1cc62c32121807175a4453a4102b1e99cb13d20399d911d87
3
+ size 73814854
el/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d173137cc27825fb0cb1dea81310141a5206692d78c150e28d23dac6f3f30e8e
3
+ size 246864
en/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f0fd1e105091e0f11cd37f9b2bc382f16de9949aa9471e1366b2605ba037167
3
+ size 308237
en/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6be63cec9ec932a5790de82caee7ee4d00e5a50ef19b79f23b740732da143424
3
+ size 50161923
en/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42961a7b66dee147da655d78db8cbaac97b3dbcf898009acffec6cda2d80f2bf
3
+ size 157207
es/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0159c9a734d7d0683d20a9850adcf3e214a49179b6517fea5868744bfd99886
3
+ size 341925
es/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f609c655a0c200168235609e7709bcf3ac48a465b4c6e90380093862a53a3a2a
3
+ size 53162139
es/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1913c1256f5b8761fba3f734e260d0b5e228a02ee31a8651ee915261cdf7c631
3
+ size 173093
fr/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bfce918feee19f8070106963281e46ae801d03583dd0786b443c2f200607d62
3
+ size 360233
fr/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e05c0cac22eba1d8c00ea0558b225af5d741671066abc9bcbc98584930f14b90
3
+ size 55425053
fr/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f97d84263ac129e54cdeec7bde66b3da9f8b1ba42cfabd0d0b3c5ac7f442cc2
3
+ size 183394
hi/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e946c50370a8fcc8cc58adb74a2625e5377f55c3f4ba821fd9d305c8d4b14c4e
3
+ size 492625
hi/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4fab2db1b6c29e0ad9b238f0f12f709f878986f8e074379821efd120e13f467
3
+ size 70167282
hi/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4cdc8ab6ab3a086dd7bc624b531a518e25352eb2907fb335643c7545e994ee0
3
+ size 248641
ru/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d939fb4f7f2b61a863fb85dde9f6ef0221dde161fb86170fb10c27ea7d9d4d0
3
+ size 477352
ru/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5db75fb1e1646d7a39dda45cdcd9b7ba95cfa1f2af0009151e8db34c22ca6f5a
3
+ size 69986524
ru/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a75ecd12d4145e2eefe22c08cb4ae4b966d18ce7452bdf26ee04f2d4c499078
3
+ size 238730
sw/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0bfad4e885a468752a5f17e4c620ac75f64902a2cef2da23f1c1e5ac973a29
3
+ size 312255
sw/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3282ea435eda702e713b958adcc1f053f415f9f57704b21cc441e1d86b834f52
3
+ size 45093826
sw/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dd236d01144c13829450cfbc12ffcc33ecdcf289a02b4c6a60ca78a557eb80b
3
+ size 158071
th/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ca847893c31a98757d0aedcffacdcca8246922d5c3782c48c735db7e6137b1
3
+ size 503402
th/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec65ff12beef95f3ee4f59aa115252656d81b41ce4b39816760f88948d138dbc
3
+ size 76466353
th/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09abadf254309e8c145777854f95c15024ab6fe29a630f7cd5aec640bffca254
3
+ size 252290
tr/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1cffb8f840d8862ab11f4727b975d408bf6f3370585cbb96733b5456902b89a
3
+ size 338133
tr/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30d0b154e3c8b2a9eb7f91aeeb29c4285c96550c42aa5ce5b4d6523be2cbafa5
3
+ size 47999788
tr/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e98b0590ff9ef4464621690f620f463288c6db079eb6210d84c9ec175f04607
3
+ size 171759
ur/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:780b0b46fe8234fc9d478978d742697b82bd43cb7f34a63d2993e8a65c54352b
3
+ size 427737
ur/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d6d2105e1433a93096cdc61892c96f93e82e48d6495d1281e656212049e3dc
3
+ size 46038912
ur/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a317147da65a9c9eadbae0328068d7e8f3f7874e8215fba31e2ca2c2f55fbabd
3
+ size 216136
vi/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34918143e8de1172147abd6fd78f369d6b6b6c2f5a016c2a4febc10190803fb1
3
+ size 364126
vi/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0eefcc40a026d25745a8edc6080ab8200a2307f974f333af32552c323a3dddc
3
+ size 57140047
vi/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff6d6aa869a554c38e43be21be8e880e3422cbccc7cc04bd0544ff2931af6f82
3
+ size 185885