albertvillanova HF staff commited on
Commit
1d543e3
1 Parent(s): 196af1a

Delete loading script

Browse files
Files changed (1) hide show
  1. patent-classification.py +0 -109
patent-classification.py DELETED
@@ -1,109 +0,0 @@
1
- import json
2
- import os
3
-
4
- import datasets
5
- from datasets.tasks import TextClassification
6
-
7
- _CITATION = None
8
-
9
-
10
- _DESCRIPTION = """
11
- Patent Classification Dataset: a classification of Patents (9 classes).
12
- It contains 9 unbalanced classes, 35k Patents and summaries divided into 3 splits: train (25k), val (5k) and test (5k).
13
- Data are sampled from "BIGPATENT: A Large-Scale Dataset for Abstractive and Coherent Summarization." by Eva Sharma, Chen Li and Lu Wang
14
- See: https://aclanthology.org/P19-1212.pdf
15
- See: https://evasharma.github.io/bigpatent/
16
- """
17
-
18
- _LABELS = [
19
- "Human Necessities",
20
- "Performing Operations; Transporting",
21
- "Chemistry; Metallurgy",
22
- "Textiles; Paper",
23
- "Fixed Constructions",
24
- "Mechanical Engineering; Lightning; Heating; Weapons; Blasting",
25
- "Physics",
26
- "Electricity",
27
- "General tagging of new or cross-sectional technology",
28
- ]
29
-
30
- class PatentClassificationConfig(datasets.BuilderConfig):
31
- """BuilderConfig for PatentClassification."""
32
-
33
- def __init__(self, **kwargs):
34
- """BuilderConfig for PatentClassification.
35
- Args:
36
- **kwargs: keyword arguments forwarded to super.
37
- """
38
- super(PatentClassificationConfig, self).__init__(**kwargs)
39
-
40
-
41
- class PatentClassificationDataset(datasets.GeneratorBasedBuilder):
42
- """PatentClassification Dataset: classification of Patents (9 classes)."""
43
-
44
- _DOWNLOAD_URL = "https://huggingface.co/datasets/ccdv/patent-classification/resolve/main/"
45
- _TRAIN_FILE = "train_data.txt"
46
- _VAL_FILE = "val_data.txt"
47
- _TEST_FILE = "test_data.txt"
48
- _LABELS_DICT = {label: i for i, label in enumerate(_LABELS)}
49
-
50
- BUILDER_CONFIGS = [
51
- PatentClassificationConfig(
52
- name="patent",
53
- version=datasets.Version("1.0.0"),
54
- description="Patent Classification Dataset: A classification task of Patents (9 classes)",
55
- ),
56
-
57
- PatentClassificationConfig(
58
- name="abstract",
59
- version=datasets.Version("1.0.0"),
60
- description="Patent Classification Dataset: A classification task of Patents with abstracts (9 classes)",
61
- ),
62
- ]
63
-
64
- DEFAULT_CONFIG_NAME = "patent"
65
-
66
- def _info(self):
67
- return datasets.DatasetInfo(
68
- description=_DESCRIPTION,
69
- features=datasets.Features(
70
- {
71
- "text": datasets.Value("string"),
72
- "label": datasets.features.ClassLabel(names=_LABELS),
73
- }
74
- ),
75
- supervised_keys=None,
76
- citation=_CITATION,
77
- task_templates=[TextClassification(
78
- text_column="text", label_column="label")],
79
- )
80
-
81
- def _split_generators(self, dl_manager):
82
- train_path = dl_manager.download_and_extract(self._TRAIN_FILE)
83
- val_path = dl_manager.download_and_extract(self._VAL_FILE)
84
- test_path = dl_manager.download_and_extract(self._TEST_FILE)
85
-
86
- return [
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
89
- ),
90
- datasets.SplitGenerator(
91
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}
92
- ),
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
95
- ),
96
- ]
97
-
98
- def _generate_examples(self, filepath):
99
- """Generate PatentClassification examples."""
100
- with open(filepath, encoding="utf-8") as f:
101
- for id_, row in enumerate(f):
102
- data = json.loads(row)
103
- label = self._LABELS_DICT[data["label"]]
104
-
105
- if self.config.name == "abstract":
106
- text = data["abstract"]
107
- else:
108
- text = data["description"]
109
- yield id_, {"text": text, "label": label}