init
Browse files- README.md +42 -0
- process.py +24 -0
- t_rex.py +78 -0
README.md
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- en
|
4 |
+
license:
|
5 |
+
- other
|
6 |
+
multilinguality:
|
7 |
+
- monolingual
|
8 |
+
pretty_name: t_rex
|
9 |
+
---
|
10 |
+
|
11 |
+
# Dataset Card for "relbert/t_rex"
|
12 |
+
## Dataset Description
|
13 |
+
- **Repository:** [https://hadyelsahar.github.io/t-rex/](https://hadyelsahar.github.io/t-rex/)
|
14 |
+
- **Paper:** [https://aclanthology.org/L18-1544/](https://aclanthology.org/L18-1544/)
|
15 |
+
- **Dataset:** T-REX
|
16 |
+
|
17 |
+
### Dataset Summary
|
18 |
+
This is the T-REX dataset proposed in [https://aclanthology.org/L18-1544/](https://aclanthology.org/L18-1544/).
|
19 |
+
|
20 |
+
|
21 |
+
## Dataset Structure
|
22 |
+
### Data Instances
|
23 |
+
An example looks as follows.
|
24 |
+
```
|
25 |
+
{
|
26 |
+
"predicate": "released",
|
27 |
+
"object": "December 18, 1954",
|
28 |
+
"subject": "It",
|
29 |
+
"title": "Touché, Pussy Cat!",
|
30 |
+
"text": "Touché, Pussy Cat! is a 1954 one-reel animated Tom and Jerry short, directed by William Hanna and Joseph Barbera, with production by Fred Quimby and music by Scott Bradley. It was nominated for an Academy Award in 1954, the series' final Oscar nomination, while Johann Mouse won the last award for the series a year before. Touché, Pussy Cat! is a follow-up to the 1952 cartoon The Two Mouseketeers, which won the 1951 Academy Award. The title is also Tuffy's catchphrase in the \"Mouseketeer\" shorts. The cartoon was animated by Kenneth Muse, Ed Barge and Irven Spence, with backgrounds by Robert Gentle. It was released in theaters by Metro-Goldwyn-Mayer on December 18, 1954. It was the first of the Tom and Jerry shorts to be produced in the widescreen CinemaScope format, but was the second CinemaScope-produced short to be released (after Pet Peeve, released the previous month; Touché, Pussy Cat! has an earlier MPAA certificate number). The cartoon also exists in a non-Cinemascope format. Touché, Pussy Cat! spawned two further entries in the \"Mouseketeer\" series of Tom and Jerry cartoons, namely Tom and Chérie in 1955, and Royal Cat Nap in 1958."
|
31 |
+
}
|
32 |
+
```
|
33 |
+
|
34 |
+
### Citation Information
|
35 |
+
```
|
36 |
+
@inproceedings{elsahar2018t,
|
37 |
+
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
38 |
+
author={Elsahar, Hady and Vougiouklis, Pavlos and Remaci, Arslen and Gravier, Christophe and Hare, Jonathon and Laforest, Frederique and Simperl, Elena},
|
39 |
+
booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
|
40 |
+
year={2018}
|
41 |
+
}
|
42 |
+
```
|
process.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
wget https://figshare.com/ndownloader/files/8760241
|
3 |
+
unzip 8760241
|
4 |
+
"""
|
5 |
+
|
6 |
+
import json
|
7 |
+
import os
|
8 |
+
from glob import glob
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
os.makedirs('data', exist_ok=True)
|
12 |
+
f_writer = open('data/t_rex_clean.jsonl', 'w')
|
13 |
+
for i in tqdm(glob("*.json")):
|
14 |
+
with open(i) as f:
|
15 |
+
data = json.load(f)
|
16 |
+
for _data in data:
|
17 |
+
for triple in _data['triples']:
|
18 |
+
p = triple['predicate']['surfaceform']
|
19 |
+
o = triple['object']['surfaceform']
|
20 |
+
s = triple['subject']['surfaceform']
|
21 |
+
if p is None or o is None or s is None:
|
22 |
+
continue
|
23 |
+
out = {"predicate": p, "object": o, "subject": s, "title": _data["title"], "text": _data["text"]}
|
24 |
+
f_writer.write(json.dumps(out) + "\n")
|
t_rex.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import datasets
|
3 |
+
|
4 |
+
|
5 |
+
logger = datasets.logging.get_logger(__name__)
|
6 |
+
_DESCRIPTION = """T-Rex dataset."""
|
7 |
+
_NAME = "t_rex"
|
8 |
+
_VERSION = "0.0.0"
|
9 |
+
_CITATION = """
|
10 |
+
@inproceedings{elsahar2018t,
|
11 |
+
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
12 |
+
author={Elsahar, Hady and Vougiouklis, Pavlos and Remaci, Arslen and Gravier, Christophe and Hare, Jonathon and Laforest, Frederique and Simperl, Elena},
|
13 |
+
booktitle={Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
|
14 |
+
year={2018}
|
15 |
+
}
|
16 |
+
"""
|
17 |
+
|
18 |
+
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
19 |
+
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
20 |
+
_TYPES = ["original"]
|
21 |
+
_URLS = {i: {
|
22 |
+
str(datasets.Split.TRAIN): [f'{_URL}/{i}.train.jsonl'],
|
23 |
+
str(datasets.Split.VALIDATION): [f'{_URL}/{i}.validation.jsonl'],
|
24 |
+
str(datasets.Split.TEST): [f'{_URL}/{i}.test.jsonl']
|
25 |
+
} for i in _TYPES}
|
26 |
+
|
27 |
+
|
28 |
+
class TREXConfig(datasets.BuilderConfig):
|
29 |
+
"""BuilderConfig"""
|
30 |
+
|
31 |
+
def __init__(self, **kwargs):
|
32 |
+
"""BuilderConfig.
|
33 |
+
Args:
|
34 |
+
**kwargs: keyword arguments forwarded to super.
|
35 |
+
"""
|
36 |
+
super(TREXConfig, self).__init__(**kwargs)
|
37 |
+
|
38 |
+
|
39 |
+
class TREX(datasets.GeneratorBasedBuilder):
|
40 |
+
"""Dataset."""
|
41 |
+
|
42 |
+
BUILDER_CONFIGS = [
|
43 |
+
TREXConfig(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION)
|
44 |
+
for i in sorted(_TYPES)
|
45 |
+
]
|
46 |
+
|
47 |
+
def _split_generators(self, dl_manager):
|
48 |
+
downloaded_file = dl_manager.download_and_extract(_URLS[self.config.name])
|
49 |
+
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
50 |
+
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
51 |
+
|
52 |
+
def _generate_examples(self, filepaths):
|
53 |
+
_key = 0
|
54 |
+
for filepath in filepaths:
|
55 |
+
logger.info(f"generating examples from = {filepath}")
|
56 |
+
with open(filepath, encoding="utf-8") as f:
|
57 |
+
_list = [i for i in f.read().split('\n') if len(i) > 0]
|
58 |
+
for i in _list:
|
59 |
+
data = json.loads(i)
|
60 |
+
yield _key, data
|
61 |
+
_key += 1
|
62 |
+
|
63 |
+
def _info(self):
|
64 |
+
return datasets.DatasetInfo(
|
65 |
+
description=_DESCRIPTION,
|
66 |
+
features=datasets.Features(
|
67 |
+
{
|
68 |
+
"title": datasets.Value("string"),
|
69 |
+
"text": datasets.Value("string"),
|
70 |
+
"predicate": datasets.Value("string"),
|
71 |
+
"object": datasets.Value("string"),
|
72 |
+
"subject": datasets.Value("string")
|
73 |
+
}
|
74 |
+
),
|
75 |
+
supervised_keys=None,
|
76 |
+
homepage=_HOME_PAGE,
|
77 |
+
citation=_CITATION,
|
78 |
+
)
|