tkon3 commited on
Commit
10b72be
1 Parent(s): a8cf3f4

first commit

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. patent-classification.py +109 -0
  3. test_data.txt +3 -0
  4. train_data.txt +3 -0
  5. val_data.txt +3 -0
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.txt filter=lfs diff=lfs merge=lfs -text
patent-classification.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+ from datasets.tasks import TextClassification
6
+
7
+ _CITATION = None
8
+
9
+
10
+ _DESCRIPTION = """
11
+ Patent Classification Dataset: a classification of Patents (9 classes).
12
+ It contains 11 slightly unbalanced classes, 25k Patents and summaries divided into 3 splits: train (25k), val (5k) and test (5k).
13
+ Copied from "Long Document Classification From Local Word Glimpses via Recurrent Attention Learning" by JUN HE LIQUN WANG LIU LIU, JIAO FENG AND HAO WU
14
+ See: https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8675939
15
+ See: https://github.com/LiqunW/Long-document-dataset
16
+ """
17
+
18
+ _LABELS = [
19
+ "Human Necessities",
20
+ "Performing Operations; Transporting",
21
+ "Chemistry; Metallurgy",
22
+ "Textiles; Paper",
23
+ "Fixed Constructions",
24
+ "Mechanical Engineering; Lightning; Heating; Weapons; Blasting",
25
+ "Physics",
26
+ "Electricity",
27
+ "General tagging of new or cross-sectional technology",
28
+ ]
29
+
30
+ class PatentClassificationConfig(datasets.BuilderConfig):
31
+ """BuilderConfig for PatentClassification."""
32
+
33
+ def __init__(self, **kwargs):
34
+ """BuilderConfig for PatentClassification.
35
+ Args:
36
+ **kwargs: keyword arguments forwarded to super.
37
+ """
38
+ super(PatentClassificationConfig, self).__init__(**kwargs)
39
+
40
+
41
+ class PatentClassificationDataset(datasets.GeneratorBasedBuilder):
42
+ """PatentClassification Dataset: classification of Patents (9 classes)."""
43
+
44
+ _DOWNLOAD_URL = "https://huggingface.co/datasets/ccdv/patent-classification/resolve/main/"
45
+ _TRAIN_FILE = "train_data.txt"
46
+ _VAL_FILE = "val_data.txt"
47
+ _TEST_FILE = "test_data.txt"
48
+ _LABELS_DICT = {label: i for i, label in enumerate(_LABELS)}
49
+
50
+ BUILDER_CONFIGS = [
51
+ PatentClassificationConfig(
52
+ name="patent",
53
+ version=datasets.Version("1.0.0"),
54
+ description="Patent Classification Dataset: A classification task of Patents (9 classes)",
55
+ ),
56
+
57
+ PatentClassificationConfig(
58
+ name="abstract",
59
+ version=datasets.Version("1.0.0"),
60
+ description="Patent Classification Dataset: A classification task of Patents with abstracts (9 classes)",
61
+ ),
62
+ ]
63
+
64
+ DEFAULT_CONFIG_NAME = "patent"
65
+
66
+ def _info(self):
67
+ return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
+ features=datasets.Features(
70
+ {
71
+ "text": datasets.Value("string"),
72
+ "label": datasets.features.ClassLabel(names=_LABELS),
73
+ }
74
+ ),
75
+ supervised_keys=None,
76
+ citation=_CITATION,
77
+ task_templates=[TextClassification(
78
+ text_column="text", label_column="label")],
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ train_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TRAIN_FILE)
83
+ val_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._VAL_FILE)
84
+ test_path = dl_manager.download_and_extract(self._DOWNLOAD_URL + self._TEST_FILE)
85
+
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
95
+ ),
96
+ ]
97
+
98
+ def _generate_examples(self, filepath):
99
+ """Generate PatentClassification examples."""
100
+ with open(filepath, encoding="utf-8") as f:
101
+ for id_, row in enumerate(f):
102
+ data = json.loads(row)
103
+ label = self._LABELS_DICT[data["label"]]
104
+
105
+ if self.config.name == "abstract":
106
+ text = data["abstract"]
107
+ else:
108
+ text = data["description"]
109
+ yield id_, {"text": text, "label": label}
test_data.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d5e0c38b4fba2b354665550b43ee8495bd6a8da6185cd1ef18dbf5b62bfb065
3
+ size 97819427
train_data.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ee4526f8f5fe2196df595f3302415df52a594da33f9147c1ea156b92992e13d
3
+ size 486602299
val_data.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c4a3e43c549469ad7191d74a81a1eebe337edb71376265eafe3d0a72c5eff19
3
+ size 99305673