parquet-converter commited on
Commit
6c05784
1 Parent(s): f710c4c

Update parquet files

Browse files
.DS_Store DELETED
Binary file (6.15 kB)
 
.gitattributes DELETED
@@ -1,29 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- klue-mrc-v1.1_dev.json filter=lfs diff=lfs merge=lfs -text
29
- klue-mrc-v1.1_train.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
klue-mrc-v1.1_dev.json → KLUE Machine Reading Comprehension/klue-mrc-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b31bb073e47cdeb19f2f1bae03d916ecabf945140334c75842d6994f700d4f47
3
- size 18712914
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5dc367dc8dd2e7c3108808174cbbf288df818b53d7233d60cbc5f0182629110
3
+ size 19445126
klue-mrc-v1.1_train.json → KLUE Machine Reading Comprehension/klue-mrc-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c52c7b82a6015c09ea8be1fea49c912e73012d8b44d3653256d91c65d519c3df
3
- size 47952737
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:003aad3a4a4d2da8ef365ab0fd4b123d4efda0195e07d9c35392ae0b4b2f7fe6
3
+ size 8071653
klue-mrc.py DELETED
@@ -1,101 +0,0 @@
1
-
2
- import json
3
- import pandas as pd
4
- import datasets
5
-
6
- logger = datasets.logging.get_logger(__name__)
7
-
8
- _DESCRIPTION = """\
9
- Klue Machine Reading Comprehension Data
10
- """
11
-
12
- _URL = "https://huggingface.co/datasets/LeverageX/klue-mrc/resolve/main/"
13
- _URLS = {
14
- "train_data": _URL + "klue-mrc-v1.1_train.json",
15
- "validation_data": _URL + "klue-mrc-v1.1_dev.json",
16
- }
17
-
18
- class KoreanNewspaper(datasets.GeneratorBasedBuilder):
19
-
20
- BUILDER_CONFIGS = [
21
- datasets.BuilderConfig(
22
- name="KLUE Machine Reading Comprehension",
23
- version=datasets.Version("1.0.0", ""),
24
- description="For LeverageX Project",
25
- ),
26
- ]
27
-
28
- def _info(self):
29
- return datasets.DatasetInfo(
30
- description=_DESCRIPTION,
31
- features=datasets.Features(
32
- {
33
- "context": datasets.Value("string"),
34
- "question": datasets.Value("string"),
35
- "answers":
36
- {
37
- "answer_start" : datasets.Sequence(datasets.Value("int32")),
38
- "text":datasets.Sequence(datasets.Value("string"))
39
- },
40
- "guid":datasets.Value("string"),
41
-
42
- }
43
- ),
44
- # No default supervised_keys (as we have to pass both question
45
- # and context as input).
46
- supervised_keys=None,
47
- homepage="https://klue-benchmark.com/tasks/70/overview/description",
48
- )
49
-
50
- def _split_generators(self, dl_manager):
51
- downloaded_files = dl_manager.download_and_extract(_URLS)
52
- return [
53
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train_data"]}),
54
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation_data"]}),
55
- ]
56
-
57
- def _generate_examples(self, filepath):
58
- """This function returns the examples in the raw (text) form."""
59
- logger.info("generating examples from = %s", filepath)
60
- key = 0
61
- with open(filepath, encoding="utf-8") as f :
62
- data = json.load(f)
63
-
64
- data = data['data']
65
-
66
- for info in data :
67
- title = info['title']
68
- news_category = info['news_category']
69
- source = info['source']
70
-
71
- paragraphs = info['paragraphs']
72
-
73
- if len(paragraphs) == 0 :
74
- continue
75
-
76
- context = paragraphs[0]['context']
77
- qas = paragraphs[0]['qas']
78
-
79
- for q in qas :
80
- question = q['question']
81
-
82
- answer_key = 'answers' if len(q['answers']) > 0 else 'plausible_answers'
83
- answer = q[answer_key]
84
-
85
- answer_text_list = []
86
- answer_start_list = []
87
-
88
- for ans in answer :
89
- answer_text_list.append(ans['text'])
90
- answer_start_list.append(ans['answer_start'])
91
-
92
- answer_data = {'text' : answer_text_list, 'answer_start' : answer_start_list}
93
- guid = q['guid']
94
-
95
- yield key, {
96
- "guid" : guid,
97
- "context" : context,
98
- "question" : question,
99
- "answers" : answer_data
100
- }
101
- key += 1