hpprc commited on
Commit
456b2a6
1 Parent(s): 6d0b64d
Files changed (6) hide show
  1. .python-version +1 -0
  2. README.md +22 -0
  3. en-ja-align.py +175 -0
  4. pyproject.toml +27 -0
  5. requirements-dev.lock +92 -0
  6. requirements.lock +92 -0
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10.13
README.md ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # en-ja-align
2
+
3
+
4
+ ## Preprocess
5
+
6
+
7
+
8
+
9
+ ## License
10
+
11
+ > オリジナルの各作品の著作権は,それぞれの作品の著作権に従います.
12
+ > その他のものについては Creative Commons Attribution-ShareAlike 3.0 Unported に従い,それらの著作権は情報通信研究機構多言語翻訳研究室が保持します.
13
+ > なお,オリジナルの作品が二次配布を禁止している場合には,そこからの派生物(対訳データなど)も二次配布することを禁止します.
14
+ > また,オリジナルの作品が営利目的の利用を禁止している場合には,派生物の営利目的の利用も禁止します.
15
+
16
+
17
+ ## Citation
18
+
19
+ ```
20
+ 内山将夫,高橋真弓.(2003) 日英対訳文対応付けデータ.
21
+ Masao Utiyama and Mayumi Takahashi. (2003) English-Japanese Translation Alignment Data.
22
+ ```
en-ja-align.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from typing import List
3
+ import unicodedata
4
+ from pathlib import Path
5
+
6
+ from bs4 import BeautifulSoup
7
+
8
+ import datasets as ds
9
+
10
+
11
+ _DESCRIPTION = "Parallel passages from novels."
12
+
13
+ _CITATION = """
14
+ 内山将夫,高橋真弓.(2003) 日英対訳文対応付けデータ.
15
+ Masao Utiyama and Mayumi Takahashi. (2003) English-Japanese Translation Alignment Data.
16
+ """.strip()
17
+
18
+ _HOMEPAGE = "https://www2.nict.go.jp/astrec-att/member/mutiyama/align/"
19
+
20
+ _LICENSE = None
21
+
22
+ _DOWNLOAD_URL = (
23
+ "https://www2.nict.go.jp/astrec-att/member/mutiyama/align/download/align-070215.zip"
24
+ )
25
+
26
+
27
+ def preprocess(text: str):
28
+ text = re.sub(r"<注[0-9]+>", "", text.strip())
29
+ text = re.sub(r"[#.*?]", "", text)
30
+ text = re.sub(r"([\u3040-\u309F]+)", "", text)
31
+ text = re.sub(r" − (.+) − ", "――\\1――", text)
32
+ text = re.sub(r"_(.+)_", "\\1", text)
33
+ return text.strip()
34
+
35
+
36
+ def parse_html_table(path: Path):
37
+ try:
38
+ with path.open(encoding="shift_jis") as f:
39
+ content = f.read()
40
+ except UnicodeDecodeError:
41
+ try:
42
+ with path.open(encoding="utf-8") as f:
43
+ content = f.read()
44
+ except UnicodeDecodeError:
45
+ try:
46
+ with path.open(encoding="cp932") as f:
47
+ content = f.read()
48
+ except UnicodeDecodeError:
49
+ return [], []
50
+
51
+ soup = BeautifulSoup(content, "lxml")
52
+ tables = soup.find_all("table")
53
+
54
+ texts_en, texts_ja = [], []
55
+ cur_text_en, cur_text_ja = "", ""
56
+
57
+ cur_left_parens, cur_right_parens = 0, 0
58
+ cur_left_quote, cur_right_quote = 0, 0
59
+ cur_left_parens_ja, cur_right_parens_ja = 0, 0
60
+ cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0
61
+
62
+ for table in tables:
63
+ for tr in table.find_all("tr"):
64
+ text_en, _, text_ja = (preprocess(td.text) for td in tr.find_all("td"))
65
+ text_ja = unicodedata.normalize("NFKC", text_ja)
66
+
67
+ cur_left_parens += text_en.count("(")
68
+ cur_right_parens += text_en.count(")")
69
+ cur_left_quote += len(list(re.findall(r"``", text_en)))
70
+ cur_right_quote += len(list(re.findall(r"''", text_en)))
71
+
72
+ cur_left_parens_ja += text_ja.count("「")
73
+ cur_right_parens_ja += text_ja.count("」")
74
+ cur_left_parens_ja2 += text_ja.count("『")
75
+ cur_right_parens_ja2 += text_ja.count("』")
76
+
77
+ if (
78
+ text_ja.strip().endswith("。")
79
+ and text_en.strip().endswith(".")
80
+ and cur_left_parens == cur_right_parens
81
+ and cur_left_quote == cur_right_quote
82
+ and cur_left_parens_ja == cur_right_parens_ja
83
+ and cur_left_parens_ja2 == cur_right_parens_ja2
84
+ ):
85
+ texts_en.append((cur_text_en + " " + text_en).strip())
86
+ texts_ja.append((cur_text_ja + text_ja).strip())
87
+ cur_text_en, cur_text_ja = "", ""
88
+ cur_left_parens, cur_right_parens = 0, 0
89
+ cur_left_quote, cur_right_quote = 0, 0
90
+ cur_left_parens_ja, cur_right_parens_ja = 0, 0
91
+ cur_left_parens_ja2, cur_right_parens_ja2 = 0, 0
92
+ else:
93
+ cur_text_en += " " + text_en
94
+ cur_text_ja += text_ja
95
+
96
+ texts_en.append(cur_text_en.strip())
97
+ texts_ja.append(cur_text_ja.strip())
98
+
99
+ return texts_en, texts_ja
100
+
101
+
102
+ class EnJaAlignDataset(ds.GeneratorBasedBuilder):
103
+ VERSION = ds.Version("1.0.0")
104
+ DEFAULT_CONFIG_NAME = "default"
105
+
106
+ BUILDER_CONFIGS = [
107
+ ds.BuilderConfig(
108
+ name="default",
109
+ version=VERSION,
110
+ description="",
111
+ ),
112
+ ]
113
+
114
+ def _info(self) -> ds.DatasetInfo:
115
+ if self.config.name == "default":
116
+ features = ds.Features(
117
+ {
118
+ "id": ds.Value("int64"),
119
+ "en": ds.Value("string"),
120
+ "ja": ds.Value("string"),
121
+ "source": ds.Value("string"),
122
+ }
123
+ )
124
+
125
+ return ds.DatasetInfo(
126
+ description=_DESCRIPTION,
127
+ citation=_CITATION,
128
+ homepage=_HOMEPAGE,
129
+ license=_LICENSE,
130
+ features=features,
131
+ )
132
+
133
+ def _split_generators(self, dl_manager: ds.DownloadManager):
134
+ data_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
135
+ paths = list(Path(data_path, "align/htmPages").glob("*.htm"))
136
+
137
+ return [
138
+ ds.SplitGenerator(
139
+ name=ds.Split.TRAIN,
140
+ gen_kwargs={"paths": paths},
141
+ )
142
+ ]
143
+
144
+ def _preprocess_ja(self, text: str) -> str:
145
+ text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip()
146
+ text = re.sub(r"^――", "", text).strip()
147
+ return text
148
+
149
+ def _preprocess_en(self, text: str) -> str:
150
+ text = re.sub(r"\d+\.(\d|\.)*", "", text.strip()).strip()
151
+ text = re.sub(r"```(.*?)'", "``\1", text).strip()
152
+ text = re.sub(r"``(.*?)''", r'"\1"', text).strip()
153
+ return text
154
+
155
+ def _generate_examples(self, paths: List[Path]):
156
+ idx = 0
157
+
158
+ for path in paths:
159
+ texts_en, texts_ja = parse_html_table(path)
160
+ for text_en, text_ja in zip(texts_en, texts_ja, strict=True):
161
+ row = {
162
+ "id": idx,
163
+ "en": self._preprocess_en(text_en),
164
+ "ja": self._preprocess_ja(text_ja),
165
+ "source": path.name,
166
+ }
167
+
168
+ if (
169
+ isinstance(row["en"], str)
170
+ and isinstance(row["ja"], str)
171
+ and len(row["en"]) > 0
172
+ and len(row["ja"]) > 0
173
+ ):
174
+ yield idx, row
175
+ idx += 1
pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "en-ja-align"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ authors = [{ name = "hppRC", email = "[email protected]" }]
6
+ dependencies = [
7
+ "datasets>=2.18.0",
8
+ "beautifulsoup4>=4.12.3",
9
+ "tqdm>=4.66.2",
10
+ "lxml>=5.1.0",
11
+ ]
12
+ readme = "README.md"
13
+ requires-python = ">= 3.8"
14
+
15
+ [build-system]
16
+ requires = ["hatchling"]
17
+ build-backend = "hatchling.build"
18
+
19
+ [tool.rye]
20
+ managed = true
21
+ dev-dependencies = []
22
+
23
+ [tool.hatch.metadata]
24
+ allow-direct-references = true
25
+
26
+ [tool.hatch.build.targets.wheel]
27
+ packages = ["en-ja-align"]
requirements-dev.lock ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+
10
+ -e file:.
11
+ aiohttp==3.9.3
12
+ # via datasets
13
+ # via fsspec
14
+ aiosignal==1.3.1
15
+ # via aiohttp
16
+ async-timeout==4.0.3
17
+ # via aiohttp
18
+ attrs==23.2.0
19
+ # via aiohttp
20
+ beautifulsoup4==4.12.3
21
+ # via en-ja-align
22
+ certifi==2024.2.2
23
+ # via requests
24
+ charset-normalizer==3.3.2
25
+ # via requests
26
+ datasets==2.18.0
27
+ # via en-ja-align
28
+ dill==0.3.8
29
+ # via datasets
30
+ # via multiprocess
31
+ filelock==3.13.1
32
+ # via datasets
33
+ # via huggingface-hub
34
+ frozenlist==1.4.1
35
+ # via aiohttp
36
+ # via aiosignal
37
+ fsspec==2024.2.0
38
+ # via datasets
39
+ # via huggingface-hub
40
+ huggingface-hub==0.21.4
41
+ # via datasets
42
+ idna==3.6
43
+ # via requests
44
+ # via yarl
45
+ lxml==5.1.0
46
+ # via en-ja-align
47
+ multidict==6.0.5
48
+ # via aiohttp
49
+ # via yarl
50
+ multiprocess==0.70.16
51
+ # via datasets
52
+ numpy==1.26.4
53
+ # via datasets
54
+ # via pandas
55
+ # via pyarrow
56
+ packaging==24.0
57
+ # via datasets
58
+ # via huggingface-hub
59
+ pandas==2.2.1
60
+ # via datasets
61
+ pyarrow==15.0.2
62
+ # via datasets
63
+ pyarrow-hotfix==0.6
64
+ # via datasets
65
+ python-dateutil==2.9.0.post0
66
+ # via pandas
67
+ pytz==2024.1
68
+ # via pandas
69
+ pyyaml==6.0.1
70
+ # via datasets
71
+ # via huggingface-hub
72
+ requests==2.31.0
73
+ # via datasets
74
+ # via huggingface-hub
75
+ six==1.16.0
76
+ # via python-dateutil
77
+ soupsieve==2.5
78
+ # via beautifulsoup4
79
+ tqdm==4.66.2
80
+ # via datasets
81
+ # via en-ja-align
82
+ # via huggingface-hub
83
+ typing-extensions==4.10.0
84
+ # via huggingface-hub
85
+ tzdata==2024.1
86
+ # via pandas
87
+ urllib3==2.2.1
88
+ # via requests
89
+ xxhash==3.4.1
90
+ # via datasets
91
+ yarl==1.9.4
92
+ # via aiohttp
requirements.lock ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+
10
+ -e file:.
11
+ aiohttp==3.9.3
12
+ # via datasets
13
+ # via fsspec
14
+ aiosignal==1.3.1
15
+ # via aiohttp
16
+ async-timeout==4.0.3
17
+ # via aiohttp
18
+ attrs==23.2.0
19
+ # via aiohttp
20
+ beautifulsoup4==4.12.3
21
+ # via en-ja-align
22
+ certifi==2024.2.2
23
+ # via requests
24
+ charset-normalizer==3.3.2
25
+ # via requests
26
+ datasets==2.18.0
27
+ # via en-ja-align
28
+ dill==0.3.8
29
+ # via datasets
30
+ # via multiprocess
31
+ filelock==3.13.1
32
+ # via datasets
33
+ # via huggingface-hub
34
+ frozenlist==1.4.1
35
+ # via aiohttp
36
+ # via aiosignal
37
+ fsspec==2024.2.0
38
+ # via datasets
39
+ # via huggingface-hub
40
+ huggingface-hub==0.21.4
41
+ # via datasets
42
+ idna==3.6
43
+ # via requests
44
+ # via yarl
45
+ lxml==5.1.0
46
+ # via en-ja-align
47
+ multidict==6.0.5
48
+ # via aiohttp
49
+ # via yarl
50
+ multiprocess==0.70.16
51
+ # via datasets
52
+ numpy==1.26.4
53
+ # via datasets
54
+ # via pandas
55
+ # via pyarrow
56
+ packaging==24.0
57
+ # via datasets
58
+ # via huggingface-hub
59
+ pandas==2.2.1
60
+ # via datasets
61
+ pyarrow==15.0.2
62
+ # via datasets
63
+ pyarrow-hotfix==0.6
64
+ # via datasets
65
+ python-dateutil==2.9.0.post0
66
+ # via pandas
67
+ pytz==2024.1
68
+ # via pandas
69
+ pyyaml==6.0.1
70
+ # via datasets
71
+ # via huggingface-hub
72
+ requests==2.31.0
73
+ # via datasets
74
+ # via huggingface-hub
75
+ six==1.16.0
76
+ # via python-dateutil
77
+ soupsieve==2.5
78
+ # via beautifulsoup4
79
+ tqdm==4.66.2
80
+ # via datasets
81
+ # via en-ja-align
82
+ # via huggingface-hub
83
+ typing-extensions==4.10.0
84
+ # via huggingface-hub
85
+ tzdata==2024.1
86
+ # via pandas
87
+ urllib3==2.2.1
88
+ # via requests
89
+ xxhash==3.4.1
90
+ # via datasets
91
+ yarl==1.9.4
92
+ # via aiohttp