|
import json |
|
import warnings |
|
from dataclasses import dataclass |
|
from itertools import chain |
|
from pathlib import Path |
|
from typing import Iterable, Optional |
|
|
|
from lxml import etree |
|
from tqdm import tqdm |
|
|
|
|
|
@dataclass |
|
class Document: |
|
id: str |
|
subtitles: dict[str, str] |
|
|
|
@classmethod |
|
def parse_from_file(cls, path: Path): |
|
tree = etree.parse(str(path)) |
|
root = tree.getroot() |
|
id = root.get("id") |
|
subtitles = dict() |
|
for sub in root: |
|
if sub.tag == "s": |
|
content = etree.ElementTree(sub).xpath("string()") |
|
subtitles[sub.get("id")] = content |
|
return cls(id, subtitles) |
|
|
|
|
|
@dataclass |
|
class Link: |
|
link_id: str |
|
src: list[str] |
|
dest: list[str] |
|
overlap: Optional[float] |
|
|
|
@classmethod |
|
def from_attributes(cls, attributes: dict[str, str]): |
|
link_id = attributes["id"] |
|
src_str, dest_str = attributes["xtargets"].split(";") |
|
src = [entry_id for entry_id in src_str.split()] |
|
dest = [entry_id for entry_id in dest_str.split()] |
|
overlap = attributes.get("overlap") |
|
if isinstance(overlap, str): |
|
overlap = float(overlap) |
|
return cls(link_id, src, dest, overlap) |
|
|
|
|
|
@dataclass |
|
class LinkGroup: |
|
src_file: Path |
|
dest_file: Path |
|
links: list[Link] |
|
|
|
@classmethod |
|
def from_element(cls, element: etree.Element, corpus_dir: Path): |
|
src_file = corpus_dir.joinpath( |
|
element.attrib["fromDoc"].removesuffix(".gz") |
|
) |
|
dest_file = corpus_dir.joinpath( |
|
element.attrib["toDoc"].removesuffix(".gz") |
|
) |
|
links = [] |
|
for sub in element: |
|
if sub.tag == "link": |
|
links.append(Link.from_attributes(sub.attrib)) |
|
return cls(src_file, dest_file, links) |
|
|
|
def generate_entries( |
|
self, src_lang: str, dest_lang: str |
|
) -> Iterable[dict]: |
|
try: |
|
src_document = Document.parse_from_file(self.src_file) |
|
dest_document = Document.parse_from_file(self.dest_file) |
|
except etree.XMLSyntaxError: |
|
warnings.warn( |
|
"Could not generate entries for link group, corrupted files." |
|
) |
|
return iter([]) |
|
for link in self.links: |
|
src_str = " ".join( |
|
[src_document.subtitles[s_id] for s_id in link.src] |
|
) |
|
dest_str = " ".join( |
|
[dest_document.subtitles[s_id] for s_id in link.dest] |
|
) |
|
yield { |
|
"link_id": link.link_id, |
|
src_lang: src_str, |
|
dest_lang: dest_str, |
|
"overlap": link.overlap, |
|
} |
|
|
|
|
|
def parse_alignment(path: Path, corpus_dir: Path) -> Iterable[LinkGroup]: |
|
tree = etree.parse(path) |
|
for element in tree.getroot(): |
|
if element.tag == "linkGrp" and element.get("targType") == "s": |
|
yield LinkGroup.from_element(element, corpus_dir) |
|
|
|
|
|
def iterlen(iterable: Iterable) -> int: |
|
n = 0 |
|
for _ in iterable: |
|
n += 1 |
|
return n |
|
|
|
|
|
def main(): |
|
alignment_file = Path("raw/da-sv.xml") |
|
corpus_dir = Path("raw/OpenSubtitles/raw/") |
|
groups = parse_alignment(alignment_file, corpus_dir) |
|
print("Counting groups...") |
|
n_groups = iterlen(groups) |
|
groups = parse_alignment(alignment_file, corpus_dir) |
|
groups = tqdm(groups, desc="Extracting all alignments...", total=n_groups) |
|
entries = chain.from_iterable( |
|
(group.generate_entries("da", "sv") for group in groups) |
|
) |
|
out_path = "aligned.jsonl" |
|
with open(out_path, "w") as out_file: |
|
for entry in entries: |
|
out_file.write(json.dumps(entry) + "\n") |
|
print("Done") |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|