kardosdrur
commited on
Commit
•
3314ed6
1
Parent(s):
628b198
Added source code
Browse files- src/align_corpora.py +128 -0
- src/filter_corpus.py +40 -0
- src/push_to_hub.py +24 -0
src/align_corpora.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import warnings
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from itertools import chain
|
5 |
+
from pathlib import Path
|
6 |
+
from typing import Iterable, Optional
|
7 |
+
|
8 |
+
from lxml import etree
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
|
12 |
+
@dataclass
|
13 |
+
class Document:
|
14 |
+
id: str
|
15 |
+
subtitles: dict[str, str]
|
16 |
+
|
17 |
+
@classmethod
|
18 |
+
def parse_from_file(cls, path: Path):
|
19 |
+
tree = etree.parse(str(path))
|
20 |
+
root = tree.getroot()
|
21 |
+
id = root.get("id")
|
22 |
+
subtitles = dict()
|
23 |
+
for sub in root:
|
24 |
+
if sub.tag == "s":
|
25 |
+
content = etree.ElementTree(sub).xpath("string()")
|
26 |
+
subtitles[sub.get("id")] = content
|
27 |
+
return cls(id, subtitles)
|
28 |
+
|
29 |
+
|
30 |
+
@dataclass
|
31 |
+
class Link:
|
32 |
+
link_id: str
|
33 |
+
src: list[str]
|
34 |
+
dest: list[str]
|
35 |
+
overlap: Optional[float]
|
36 |
+
|
37 |
+
@classmethod
|
38 |
+
def from_attributes(cls, attributes: dict[str, str]):
|
39 |
+
link_id = attributes["id"]
|
40 |
+
src_str, dest_str = attributes["xtargets"].split(";")
|
41 |
+
src = [entry_id for entry_id in src_str.split()]
|
42 |
+
dest = [entry_id for entry_id in dest_str.split()]
|
43 |
+
overlap = attributes.get("overlap")
|
44 |
+
if isinstance(overlap, str):
|
45 |
+
overlap = float(overlap)
|
46 |
+
return cls(link_id, src, dest, overlap)
|
47 |
+
|
48 |
+
|
49 |
+
@dataclass
|
50 |
+
class LinkGroup:
|
51 |
+
src_file: Path
|
52 |
+
dest_file: Path
|
53 |
+
links: list[Link]
|
54 |
+
|
55 |
+
@classmethod
|
56 |
+
def from_element(cls, element: etree.Element, corpus_dir: Path):
|
57 |
+
src_file = corpus_dir.joinpath(
|
58 |
+
element.attrib["fromDoc"].removesuffix(".gz")
|
59 |
+
)
|
60 |
+
dest_file = corpus_dir.joinpath(
|
61 |
+
element.attrib["toDoc"].removesuffix(".gz")
|
62 |
+
)
|
63 |
+
links = []
|
64 |
+
for sub in element:
|
65 |
+
if sub.tag == "link":
|
66 |
+
links.append(Link.from_attributes(sub.attrib))
|
67 |
+
return cls(src_file, dest_file, links)
|
68 |
+
|
69 |
+
def generate_entries(
|
70 |
+
self, src_lang: str, dest_lang: str
|
71 |
+
) -> Iterable[dict]:
|
72 |
+
try:
|
73 |
+
src_document = Document.parse_from_file(self.src_file)
|
74 |
+
dest_document = Document.parse_from_file(self.dest_file)
|
75 |
+
except etree.XMLSyntaxError:
|
76 |
+
warnings.warn(
|
77 |
+
"Could not generate entries for link group, corrupted files."
|
78 |
+
)
|
79 |
+
return iter([])
|
80 |
+
for link in self.links:
|
81 |
+
src_str = " ".join(
|
82 |
+
[src_document.subtitles[s_id] for s_id in link.src]
|
83 |
+
)
|
84 |
+
dest_str = " ".join(
|
85 |
+
[dest_document.subtitles[s_id] for s_id in link.dest]
|
86 |
+
)
|
87 |
+
yield {
|
88 |
+
"link_id": link.link_id,
|
89 |
+
src_lang: src_str,
|
90 |
+
dest_lang: dest_str,
|
91 |
+
"overlap": link.overlap,
|
92 |
+
}
|
93 |
+
|
94 |
+
|
95 |
+
def parse_alignment(path: Path, corpus_dir: Path) -> Iterable[LinkGroup]:
|
96 |
+
tree = etree.parse(path)
|
97 |
+
for element in tree.getroot():
|
98 |
+
if element.tag == "linkGrp" and element.get("targType") == "s":
|
99 |
+
yield LinkGroup.from_element(element, corpus_dir)
|
100 |
+
|
101 |
+
|
102 |
+
def iterlen(iterable: Iterable) -> int:
|
103 |
+
n = 0
|
104 |
+
for _ in iterable:
|
105 |
+
n += 1
|
106 |
+
return n
|
107 |
+
|
108 |
+
|
109 |
+
def main():
|
110 |
+
alignment_file = Path("raw/da-sv.xml")
|
111 |
+
corpus_dir = Path("raw/OpenSubtitles/raw/")
|
112 |
+
groups = parse_alignment(alignment_file, corpus_dir)
|
113 |
+
print("Counting groups...")
|
114 |
+
n_groups = iterlen(groups)
|
115 |
+
groups = parse_alignment(alignment_file, corpus_dir)
|
116 |
+
groups = tqdm(groups, desc="Extracting all alignments...", total=n_groups)
|
117 |
+
entries = chain.from_iterable(
|
118 |
+
(group.generate_entries("da", "sv") for group in groups)
|
119 |
+
)
|
120 |
+
out_path = "aligned.jsonl"
|
121 |
+
with open(out_path, "w") as out_file:
|
122 |
+
for entry in entries:
|
123 |
+
out_file.write(json.dumps(entry) + "\n")
|
124 |
+
print("Done")
|
125 |
+
|
126 |
+
|
127 |
+
if __name__ == "__main__":
|
128 |
+
main()
|
src/filter_corpus.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import string
|
3 |
+
from typing import Iterable
|
4 |
+
|
5 |
+
|
6 |
+
def tokenize(text: str) -> tuple[str]:
|
7 |
+
text = text.lower().strip()
|
8 |
+
text = text.translate(str.maketrans("", "", string.punctuation))
|
9 |
+
return tuple(text.split())
|
10 |
+
|
11 |
+
|
12 |
+
def entry_filter_pass(entry: dict) -> bool:
|
13 |
+
if (entry["overlap"] is None) or (entry["overlap"] < 0.5):
|
14 |
+
return False
|
15 |
+
da_tokens = tokenize(entry["da"])
|
16 |
+
sv_tokens = tokenize(entry["sv"])
|
17 |
+
if da_tokens == sv_tokens:
|
18 |
+
return False
|
19 |
+
if (len(da_tokens) < 6) or (len(sv_tokens) < 6):
|
20 |
+
return False
|
21 |
+
return True
|
22 |
+
|
23 |
+
|
24 |
+
def generate_from_jsonl(path: str) -> Iterable[dict]:
|
25 |
+
with open(path) as in_file:
|
26 |
+
for line in in_file:
|
27 |
+
entry = json.loads(line.strip())
|
28 |
+
yield entry
|
29 |
+
|
30 |
+
|
31 |
+
def main():
|
32 |
+
entries = generate_from_jsonl("aligned.jsonl")
|
33 |
+
entries = filter(entry_filter_pass, entries)
|
34 |
+
with open("aligned_filtered.jsonl", "w") as out_file:
|
35 |
+
for entry in entries:
|
36 |
+
out_file.write(json.dumps(entry) + "\n")
|
37 |
+
|
38 |
+
|
39 |
+
if __name__ == "__main__":
|
40 |
+
main()
|
src/push_to_hub.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from functools import partial
|
3 |
+
from typing import Iterable
|
4 |
+
|
5 |
+
from datasets import Dataset
|
6 |
+
|
7 |
+
|
8 |
+
def generate_from_jsonl(path: str) -> Iterable[dict]:
|
9 |
+
with open(path) as in_file:
|
10 |
+
for line in in_file:
|
11 |
+
entry = json.loads(line.strip())
|
12 |
+
yield entry
|
13 |
+
|
14 |
+
|
15 |
+
def main():
|
16 |
+
dataset = Dataset.from_generator(
|
17 |
+
partial(generate_from_jsonl, "aligned_filtered.jsonl")
|
18 |
+
)
|
19 |
+
dataset = dataset.train_test_split(test_size=0.2, shuffle=True)
|
20 |
+
dataset.push_to_hub("kardosdrur/opensubtitles-da-sv")
|
21 |
+
|
22 |
+
|
23 |
+
if __name__ == "__main__":
|
24 |
+
main()
|