nenekochan
commited on
Commit
•
df03d35
1
Parent(s):
7d2b3b6
initial processing pipeline
Browse files- .gitignore +5 -0
- README.md +49 -0
- changelog.md +5 -0
- ks-parse-all.py +36 -0
- ks_parse/__main__.py +32 -0
- ks_parse/chapter.py +29 -0
- ks_parse/chapter_adv.py +51 -0
- ks_parse/chapter_novel.py +42 -0
- ks_parse/ks.py +36 -0
- manual_seg-progress.md +26 -0
- script/dos2unix.sh +8 -0
- script/transcode.sh +10 -0
- segment/__init__.py +0 -0
- segment/auto/__main__.py +100 -0
- segment/auto/classifier.py +77 -0
- segment/auto/similarity.py +34 -0
- segment/manual/__main__.py +111 -0
- segment/utils.py +48 -0
.gitignore
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
/.vscode/
|
3 |
+
|
4 |
+
/conversation/
|
5 |
+
/scenario*/
|
README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pretty_name: 夜羊L系列简中脚本
|
3 |
+
language:
|
4 |
+
- zh
|
5 |
+
language_details: zho_Hans
|
6 |
+
license: cc-by-nc-4.0
|
7 |
+
annotations_creators:
|
8 |
+
- expert-generated
|
9 |
+
- machine-generated
|
10 |
+
task_categories:
|
11 |
+
- conversational
|
12 |
+
- text-generation
|
13 |
+
---
|
14 |
+
|
15 |
+
> 睡不着的夜晚和不想睡觉的夜晚
|
16 |
+
|
17 |
+
## ⚠️注意
|
18 |
+
|
19 |
+
- **请注意,数据来自 R18 的视觉小说,并且包含可能被认为是不适当、令人震惊、令人不安、令人反感和极端的主题。如果您不确定在您的国家拥有任何形式的虚构文字内容的法律后果,请不要下载。**
|
20 |
+
- **本项目内的所有数据及基于这些数据的衍生作品禁止用作商业性目的。** 我不拥有 `scenario-raw` 里的 krkr2 脚本源文件,而其余的数据处理方法按照 CC BY-NC 4.0 协议开放。
|
21 |
+
- 按照数据预处理的先后次序,依次是:`scenario-raw` 里是 krkr2 脚本源文件,`scenario` 里是清理后的结构化脚本,`conversation` 里是我主观分段制作的对话格式数据。
|
22 |
+
- 对于主观分段,一部分是手动的,其余是基于文本相似度的不太靠谱自动分段(我还没推的那部分,我不想被剧透啊啊啊)。手动分段道且阻且长,慢慢做吧,进度记录在 [manual_seg-progress.md](manual_seg-progress.md)。
|
23 |
+
- 2015-2017 的前四作是单女主,后面的作品都是双女主的,脚本格式也略微不同。
|
24 |
+
- 🔑 压缩包已加密,解压密码是 yorunohitsuji
|
25 |
+
|
26 |
+
## 感谢数据源背后的汉化组们
|
27 |
+
|
28 |
+
- 与小萝莉相思相爱:脸肿汉化组
|
29 |
+
- 勾指婚约洛丽塔:守夜人汉化组&7/9工作室
|
30 |
+
- 与小萝莉相思相爱的生活:脸肿汉化组
|
31 |
+
- 同居恋人洛丽塔:守夜人汉化组
|
32 |
+
- 双子洛丽塔后宫:靴下汉化组x仓库汉化组
|
33 |
+
- 爱欲姐妹洛丽塔:守夜人汉化组
|
34 |
+
- 诱惑自大洛丽塔:守夜人汉化组
|
35 |
+
- 每日亲吻洛丽塔:比喵个人汉化
|
36 |
+
|
37 |
+
## 给我自己看的预处理流程
|
38 |
+
|
39 |
+
0. 各作的脚本提取出来放在 `scenario-raw/` 里,用 `script/transcode.sh` 转成 UTF-8,`2015-sssa` 额外需要 `script/dos2unix.sh` 转成 LF
|
40 |
+
1. 修复格式小问题 `cd scenario-raw && bash patch.sh`
|
41 |
+
2. 运行 `python ks-parse-all.py` 得到 `scenario/`
|
42 |
+
3. 分段,再转成 `conversation/`
|
43 |
+
a. 自动分段:`python -m segment.auto path/to/scenario.jsonl`
|
44 |
+
b. 手动分段后,`python -m segment.manual path/to/scenario-manual_seg.jsonl`
|
45 |
+
|
46 |
+
添加新卷:
|
47 |
+
|
48 |
+
0. 脚本放在 `scenario-raw/` 里
|
49 |
+
1. 在 `ks-parse-all.py` 里添加新卷的元数据
|
changelog.md
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
### 2024-02-01 - 夜如何其?
|
3 |
+
|
4 |
+
- 发布了数据处理的代码。
|
5 |
+
- 第一版的数据。前四部的手动分段完成一小半;后四部的手动分段还刚刚开始。
|
ks-parse-all.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ks_parse.__main__ import parse_file
|
2 |
+
|
3 |
+
from attrs import define, Factory
|
4 |
+
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
|
8 |
+
@define
|
9 |
+
class Volume:
|
10 |
+
name: str
|
11 |
+
stype: str
|
12 |
+
# ロリ妊娠はダメ、下江コハル.jpg
|
13 |
+
ignore_list: list[str] = Factory(list)
|
14 |
+
|
15 |
+
|
16 |
+
ALL_VOLUMES = [
|
17 |
+
Volume(name="2015-sssa", stype="novel"),
|
18 |
+
Volume(name="2016a-yubikiri", stype="novel", ignore_list=["C000"]),
|
19 |
+
Volume(name="2016b-sssa2", stype="novel"),
|
20 |
+
Volume(name="2017-otomari", stype="novel", ignore_list=["C000"]),
|
21 |
+
Volume(name="2018-harem", stype="adv", ignore_list=["Z000"]),
|
22 |
+
Volume(name="2019-aiyoku", stype="adv", ignore_list=["B003_2", "Z000"]),
|
23 |
+
Volume(name="2020-yuuwaku", stype="adv", ignore_list=["B000"]),
|
24 |
+
Volume(name="2022-mainichi", stype="adv", ignore_list=["Z000"]),
|
25 |
+
]
|
26 |
+
BASE = Path("./scenario-raw")
|
27 |
+
|
28 |
+
if __name__ == "__main__":
|
29 |
+
for vol in ALL_VOLUMES:
|
30 |
+
print(f"Processing {vol.name}...", end="")
|
31 |
+
for ks_path in sorted((BASE / vol.name).glob("*.ks")):
|
32 |
+
if ks_path.stem in vol.ignore_list:
|
33 |
+
continue
|
34 |
+
print(f" {ks_path.stem}", end="")
|
35 |
+
parse_file(ks_path, vol.stype)
|
36 |
+
print()
|
ks_parse/__main__.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .ks import parse
|
2 |
+
|
3 |
+
from attrs import asdict
|
4 |
+
|
5 |
+
from sys import argv
|
6 |
+
from pathlib import Path
|
7 |
+
import json
|
8 |
+
|
9 |
+
|
10 |
+
def parse_file(ks_path: Path, stype: str):
|
11 |
+
ks_name = f"{ks_path.parent.name}/{ks_path.stem}"
|
12 |
+
|
13 |
+
output_base = Path("./scenario")
|
14 |
+
output_base.mkdir(exist_ok=True)
|
15 |
+
(output_base / ks_path.parent.name).mkdir(exist_ok=True)
|
16 |
+
|
17 |
+
with ks_path.open("r") as fi:
|
18 |
+
chapters = parse(fi, stype)
|
19 |
+
for i, ch in enumerate(chapters):
|
20 |
+
with (output_base / f"{ks_name}-{i:02d}.jsonl").open("w") as fo:
|
21 |
+
json.dump({"title": ch.title}, fo, ensure_ascii=False)
|
22 |
+
print(file=fo)
|
23 |
+
for utt in ch.content:
|
24 |
+
json.dump(asdict(utt), fo, ensure_ascii=False)
|
25 |
+
print(file=fo)
|
26 |
+
|
27 |
+
|
28 |
+
if __name__ == "__main__":
|
29 |
+
if len(argv) != 3:
|
30 |
+
print(f"Usage: python -m {__package__} path/to/script.ks novel|adv")
|
31 |
+
exit(1)
|
32 |
+
parse_file(Path(argv[1]), argv[2])
|
ks_parse/chapter.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from attrs import define
|
2 |
+
|
3 |
+
|
4 |
+
import re
|
5 |
+
from typing import Iterable, Callable
|
6 |
+
|
7 |
+
TAG = re.compile(r"\[(.+?)\]")
|
8 |
+
# I hope there are no nested tags in the script...
|
9 |
+
|
10 |
+
|
11 |
+
@define
|
12 |
+
class Utterance:
|
13 |
+
speaker: str
|
14 |
+
text: str
|
15 |
+
|
16 |
+
|
17 |
+
@define
|
18 |
+
class Chapter:
|
19 |
+
title: str
|
20 |
+
content: Iterable[Utterance]
|
21 |
+
|
22 |
+
|
23 |
+
type ChapterParser = Callable[[Iterable[str]], Iterable[Utterance]]
|
24 |
+
|
25 |
+
|
26 |
+
def clean_tag(line: str) -> str:
|
27 |
+
line = TAG.sub("", line.strip())
|
28 |
+
line = line.replace(" ", "") # Remove full-width spaces
|
29 |
+
return line
|
ks_parse/chapter_adv.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .chapter import Utterance, clean_tag
|
2 |
+
|
3 |
+
import re
|
4 |
+
from typing import Iterable
|
5 |
+
|
6 |
+
TAG_SPEAKER = re.compile(r"\[speaker name=\"(.+)\"\]")
|
7 |
+
|
8 |
+
|
9 |
+
def parse(lines: Iterable[str]) -> Iterable[Utterance]:
|
10 |
+
"""Parsing Kirikiri2 script chapters of newer format,
|
11 |
+
which are distinguished by the presence of a `[startadv]` tag.
|
12 |
+
|
13 |
+
Most of `[r]` tags are implict.
|
14 |
+
Speaker names are now annotated with `[speaker]` tags.
|
15 |
+
There could be multiple heroines. (Well, for Yoru no Hitsuji, there are two.)
|
16 |
+
Lines start with left quotation marks; otherwise they are narration.
|
17 |
+
Pages only occupy the lower quarter of the screen, and page breaks are usually per sentence.
|
18 |
+
Let's take whiteout/blackout scene changes as significant divisors.
|
19 |
+
"""
|
20 |
+
|
21 |
+
speaker = None
|
22 |
+
scene_change = False
|
23 |
+
for line in lines:
|
24 |
+
if line.startswith(";"):
|
25 |
+
continue
|
26 |
+
if line.startswith("[speaker"):
|
27 |
+
match = TAG_SPEAKER.search(line)
|
28 |
+
if match is None:
|
29 |
+
raise ValueError(f"Invalid speaker tag: {line}")
|
30 |
+
speaker = match.group(1)
|
31 |
+
continue
|
32 |
+
scene_change |= "[msgv]" in line
|
33 |
+
|
34 |
+
line = clean_tag(line)
|
35 |
+
if not line:
|
36 |
+
continue
|
37 |
+
if speaker is not None and not line.startswith("「"):
|
38 |
+
# raise ValueError(f"Speaker tag not followed by a line: {line}")
|
39 |
+
# 2022-mainichi does not keep this rule consistently
|
40 |
+
line = "「" + line
|
41 |
+
if not line.endswith("」"):
|
42 |
+
line += "」"
|
43 |
+
|
44 |
+
if speaker is not None:
|
45 |
+
yield Utterance(speaker=speaker, text=line)
|
46 |
+
speaker = None
|
47 |
+
else:
|
48 |
+
yield Utterance(speaker="narrator", text=line)
|
49 |
+
if scene_change:
|
50 |
+
yield Utterance(speaker="(scene change)", text="")
|
51 |
+
scene_change = False
|
ks_parse/chapter_novel.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .chapter import Utterance, clean_tag
|
2 |
+
|
3 |
+
from typing import Iterable
|
4 |
+
|
5 |
+
|
6 |
+
def parse(lines: Iterable[str]) -> Iterable[Utterance]:
|
7 |
+
"""Parsing Kirikiri2 script chapters of older format,
|
8 |
+
which are distinguished by the presence of a `[startnovel]` tag.
|
9 |
+
|
10 |
+
Formatting is done almost exclusively with `[r]`, `[l]`, and `[p]` tags.
|
11 |
+
There are only one heroine and one male protagonist,
|
12 |
+
and the heroine's lines can be recognized by the `[voice]` tag in the preceding line.
|
13 |
+
Lines start with left quotation marks; narration starts with spaces.
|
14 |
+
Whiteout/blackout scene changes are significant divisors.
|
15 |
+
"""
|
16 |
+
|
17 |
+
heroine_line = False
|
18 |
+
scene_change = False
|
19 |
+
for line in lines:
|
20 |
+
if line.startswith(";"):
|
21 |
+
continue
|
22 |
+
if "[voice" in line:
|
23 |
+
heroine_line = True
|
24 |
+
continue
|
25 |
+
scene_change |= "[msgv]" in line
|
26 |
+
|
27 |
+
line = clean_tag(line)
|
28 |
+
if not line:
|
29 |
+
continue
|
30 |
+
if heroine_line and not line.startswith("「"):
|
31 |
+
raise ValueError(f"Voice tag not followed by heroine line: {line}")
|
32 |
+
|
33 |
+
if line.startswith("「"):
|
34 |
+
yield Utterance(
|
35 |
+
speaker="heroine" if heroine_line else "protagonist", text=line
|
36 |
+
)
|
37 |
+
heroine_line = False
|
38 |
+
else:
|
39 |
+
yield Utterance(speaker="narrator", text=line)
|
40 |
+
if scene_change:
|
41 |
+
yield Utterance(speaker="(scene change)", text="")
|
42 |
+
scene_change = False
|
ks_parse/ks.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .chapter import Chapter, ChapterParser
|
2 |
+
from .chapter_novel import parse as parse_chapter_novel
|
3 |
+
from .chapter_adv import parse as parse_chapter_adv
|
4 |
+
|
5 |
+
from typing import Iterable
|
6 |
+
|
7 |
+
|
8 |
+
def parse(lines: Iterable[str], stype: str) -> Iterable[Chapter]:
|
9 |
+
"""Entry point for parsing Kirikiri2 scripts"""
|
10 |
+
|
11 |
+
cparser: ChapterParser
|
12 |
+
match stype:
|
13 |
+
case "novel":
|
14 |
+
cparser = parse_chapter_novel
|
15 |
+
case "adv":
|
16 |
+
cparser = parse_chapter_adv
|
17 |
+
case _:
|
18 |
+
raise ValueError(f"Unknown script type: {stype}")
|
19 |
+
|
20 |
+
for line in lines:
|
21 |
+
if line.startswith("*"):
|
22 |
+
break
|
23 |
+
|
24 |
+
chunk = []
|
25 |
+
eof = False
|
26 |
+
while not eof:
|
27 |
+
ch = Chapter(title=line.removeprefix("*").strip(), content=[])
|
28 |
+
for line in lines:
|
29 |
+
if line.startswith("*"):
|
30 |
+
break
|
31 |
+
chunk.append(line)
|
32 |
+
else:
|
33 |
+
eof = True
|
34 |
+
ch.content = cparser(chunk)
|
35 |
+
chunk = []
|
36 |
+
yield ch
|
manual_seg-progress.md
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
- [ ] 2015-sssa
|
3 |
+
- [x] A000
|
4 |
+
- [x] A001
|
5 |
+
- [x] A002
|
6 |
+
- [x] A003
|
7 |
+
- [ ] A004
|
8 |
+
- [ ] B000
|
9 |
+
- [ ] B001
|
10 |
+
- [ ] B002
|
11 |
+
- [ ] Z000
|
12 |
+
- [ ] Z001
|
13 |
+
- [ ] 2016a-yubikiri
|
14 |
+
- [x] 2016b-sssa2
|
15 |
+
- [ ] 2017-otomari
|
16 |
+
- [ ] 2018-harem
|
17 |
+
- [x] A000
|
18 |
+
- [x] A001
|
19 |
+
- [x] B000
|
20 |
+
- [ ] B001
|
21 |
+
- [ ] B002
|
22 |
+
- [ ] B003
|
23 |
+
- [ ] B004
|
24 |
+
- [ ] 2019-aiyoku
|
25 |
+
- [ ] 2020-yuuwaku
|
26 |
+
- [ ] 2022-mainichi
|
script/dos2unix.sh
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
|
3 |
+
BASE=$1
|
4 |
+
|
5 |
+
for f in $BASE/*.ks; do
|
6 |
+
perl -pe 's/\r\n|\n|\r/\n/g' $f > $f.tmp # for 2015-sssa
|
7 |
+
mv $f.tmp $f
|
8 |
+
done
|
script/transcode.sh
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
|
3 |
+
BASE=$1
|
4 |
+
|
5 |
+
for f in $BASE/*.ks; do
|
6 |
+
iconv -f unicode -t utf-8 < $f > $f.tmp
|
7 |
+
#iconv -f gbk -t utf-8 < $f > $f.tmp # for 2016a-yubikiri-old
|
8 |
+
mv $f.tmp $f
|
9 |
+
done
|
10 |
+
|
segment/__init__.py
ADDED
File without changes
|
segment/auto/__main__.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Automatically segment the scenario into the final conversation format.
|
2 |
+
SentenceTransformer embeddings + classifier
|
3 |
+
|
4 |
+
e.g. from scenario/volume1/chapter1.jsonl:
|
5 |
+
{"title": "..."}
|
6 |
+
{"speaker": "(scene change)", "text": ""}
|
7 |
+
{"speaker": "narrator", "text": "..."}
|
8 |
+
{"speaker": "narrator", "text": "..."}
|
9 |
+
{"speaker": "heroine", "text": "..."}
|
10 |
+
{"speaker": "protagonist", "text": "..."}
|
11 |
+
{"speaker": "heroine", "text": "..."}
|
12 |
+
{"speaker": "protagonist", "text": "..."}
|
13 |
+
...
|
14 |
+
|
15 |
+
to conversation/volume1/chapter1.json:
|
16 |
+
{
|
17 |
+
"title": "...",
|
18 |
+
"conversation": [
|
19 |
+
{
|
20 |
+
"request": [
|
21 |
+
{"speaker": "narrator", "text": "..."},
|
22 |
+
{"speaker": "narrator", "text": "..."},
|
23 |
+
],
|
24 |
+
"response": [
|
25 |
+
{"speaker": "heroine", "text": "..."}
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"request": [
|
30 |
+
{"speaker": "protagonist", "text": "..."}
|
31 |
+
],
|
32 |
+
"response": [
|
33 |
+
{"speaker": "heroine", "text": "..."}
|
34 |
+
]
|
35 |
+
},
|
36 |
+
{
|
37 |
+
"request": [
|
38 |
+
{"speaker": "protagonist", "text": "..."},
|
39 |
+
...
|
40 |
+
"""
|
41 |
+
from .classifier import train_classifier, label_scenario, Predictor
|
42 |
+
from ..utils import dump_conversation, map_to_file_or_dir
|
43 |
+
|
44 |
+
from sys import argv
|
45 |
+
from pathlib import Path
|
46 |
+
from itertools import chain
|
47 |
+
import re
|
48 |
+
|
49 |
+
|
50 |
+
def train(target_name: str) -> Predictor:
|
51 |
+
if re.search(r"201[567]", target_name) is not None:
|
52 |
+
tset = Path("./conversation-manual").glob("201[567]*/*.json")
|
53 |
+
print("Training on 2015-2017")
|
54 |
+
else:
|
55 |
+
tset = chain(
|
56 |
+
Path("./conversation-manual").glob("201[89]*/*.json"),
|
57 |
+
Path("./conversation-manual").glob("202[02]*/*.json"),
|
58 |
+
)
|
59 |
+
print("Training on 2018-2022")
|
60 |
+
return train_classifier(tset)
|
61 |
+
|
62 |
+
|
63 |
+
def generate_seg(s_path: Path, model: Predictor):
|
64 |
+
ks_name = f"{s_path.parent.name}/{s_path.stem}"
|
65 |
+
|
66 |
+
output_base = Path("./conversation")
|
67 |
+
output_base.mkdir(exist_ok=True)
|
68 |
+
(output_base / s_path.parent.name).mkdir(exist_ok=True)
|
69 |
+
|
70 |
+
entries, labels, title = label_scenario(s_path, model)
|
71 |
+
# make sure to start with request and end with response
|
72 |
+
labels[0], labels[-1] = 0, 1
|
73 |
+
|
74 |
+
# Generate the conversation request-response pairs
|
75 |
+
key = "request"
|
76 |
+
current_round = dict(request=[], response=[])
|
77 |
+
conversation = []
|
78 |
+
for entry, label in zip(entries, labels):
|
79 |
+
if label == 0: # request
|
80 |
+
if key == "response":
|
81 |
+
conversation.append(current_round)
|
82 |
+
current_round = dict(request=[], response=[])
|
83 |
+
key = "request"
|
84 |
+
else: # response
|
85 |
+
if key == "request":
|
86 |
+
key = "response"
|
87 |
+
current_round[key].append(entry)
|
88 |
+
conversation.append(current_round)
|
89 |
+
|
90 |
+
dump_conversation(
|
91 |
+
{"title": title, "conversation": conversation}, output_base / f"{ks_name}.json"
|
92 |
+
)
|
93 |
+
|
94 |
+
|
95 |
+
if __name__ == "__main__":
|
96 |
+
if len(argv) != 2:
|
97 |
+
print(f"Usage: python -m {__package__} path/to/scenario.jsonl")
|
98 |
+
exit(1)
|
99 |
+
model = train(argv[1])
|
100 |
+
map_to_file_or_dir(generate_seg, Path(argv[1]), model)
|
segment/auto/classifier.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..utils import is_protagonist
|
2 |
+
from .similarity import batch_embedding
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from sklearn.ensemble import RandomForestClassifier
|
6 |
+
|
7 |
+
from pathlib import Path
|
8 |
+
import json
|
9 |
+
from typing import Protocol, Iterable
|
10 |
+
|
11 |
+
|
12 |
+
class Predictor(Protocol):
|
13 |
+
def predict(self, X: np.ndarray) -> np.ndarray:
|
14 |
+
...
|
15 |
+
|
16 |
+
|
17 |
+
def conversation_embedding_label(c_path: Path) -> tuple[np.ndarray, np.ndarray]:
|
18 |
+
"""Get embedding and label from narrator lines in a segmented conversation."""
|
19 |
+
d = json.loads(c_path.read_text())
|
20 |
+
conv, label, narr_mask = [], [], []
|
21 |
+
for pair in d["conversation"]:
|
22 |
+
for entry in pair["request"]:
|
23 |
+
conv.append(entry)
|
24 |
+
label.append(0)
|
25 |
+
narr_mask.append(entry["speaker"] == "narrator")
|
26 |
+
for entry in pair["response"]:
|
27 |
+
conv.append(entry)
|
28 |
+
label.append(1)
|
29 |
+
narr_mask.append(entry["speaker"] == "narrator")
|
30 |
+
|
31 |
+
em = batch_embedding(conv)
|
32 |
+
return em[narr_mask], np.array(label)[narr_mask]
|
33 |
+
|
34 |
+
|
35 |
+
def train_classifier(c_paths: Iterable[Path]) -> Predictor:
|
36 |
+
"""Train a classifier to classify narrator lines as request or response."""
|
37 |
+
em, label = [], []
|
38 |
+
for c_path in c_paths:
|
39 |
+
em_, label_ = conversation_embedding_label(c_path)
|
40 |
+
em.append(em_)
|
41 |
+
label.append(label_)
|
42 |
+
em = np.concatenate(em)
|
43 |
+
label = np.concatenate(label)
|
44 |
+
|
45 |
+
model = RandomForestClassifier()
|
46 |
+
model.fit(em, label)
|
47 |
+
print(f"Accuracy: {model.score(em, label)}")
|
48 |
+
return model
|
49 |
+
|
50 |
+
|
51 |
+
def label_scenario(
|
52 |
+
s_path: Path, model: Predictor
|
53 |
+
) -> tuple[list[dict], np.ndarray, str]:
|
54 |
+
"""Label lines for a unsegmented scenario chapter."""
|
55 |
+
conv, role = [], []
|
56 |
+
with s_path.open("r") as fi:
|
57 |
+
title = json.loads(next(fi))["title"]
|
58 |
+
for line in fi:
|
59 |
+
entry = json.loads(line)
|
60 |
+
if entry["speaker"] == "(scene change)":
|
61 |
+
continue # not recording scene change, since the plots are usually connected
|
62 |
+
conv.append(entry)
|
63 |
+
if entry["speaker"] == "narrator":
|
64 |
+
role.append(2)
|
65 |
+
elif is_protagonist(entry["speaker"]):
|
66 |
+
role.append(0)
|
67 |
+
else: # heroine
|
68 |
+
role.append(1)
|
69 |
+
|
70 |
+
role = np.array(role)
|
71 |
+
label = np.empty_like(role, dtype=int)
|
72 |
+
em = batch_embedding(conv)
|
73 |
+
label[role == 0] = 0
|
74 |
+
label[role == 1] = 1
|
75 |
+
label[role == 2] = model.predict(em[role == 2])
|
76 |
+
|
77 |
+
return conv, label, title
|
segment/auto/similarity.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ..utils import is_protagonist
|
2 |
+
|
3 |
+
from sentence_transformers import SentenceTransformer
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
model = SentenceTransformer("moka-ai/m3e-base")
|
7 |
+
|
8 |
+
|
9 |
+
def sentence_embedding(s: list[str]) -> np.ndarray:
|
10 |
+
"""Compute the sentence embedding."""
|
11 |
+
return model.encode(s, convert_to_tensor=True).cpu().numpy()
|
12 |
+
|
13 |
+
|
14 |
+
def batch_embedding(conv: list[dict]) -> np.ndarray:
|
15 |
+
"""Compute the sentence embedding for a list of utterances."""
|
16 |
+
sentences = []
|
17 |
+
for c in conv:
|
18 |
+
sen = c["text"]
|
19 |
+
if c["speaker"] != "narrator":
|
20 |
+
if is_protagonist(c["speaker"]):
|
21 |
+
sen = f"我:{sen}"
|
22 |
+
else: # heroine
|
23 |
+
sen = f"她:{sen}"
|
24 |
+
sentences.append(sen)
|
25 |
+
|
26 |
+
em = sentence_embedding(sentences)
|
27 |
+
|
28 |
+
# Take a convolution with a window size of 3 (index [-1, 0, 1], weight [0.2, 0.6, 0.2])
|
29 |
+
# In hope to capture some context of the sentence
|
30 |
+
em_conv = np.empty_like(em)
|
31 |
+
em_conv[1:-1] = 0.2 * em[:-2] + 0.6 * em[1:-1] + 0.2 * em[2:]
|
32 |
+
em_conv[0] = 0.8 * em[0] + 0.2 * em[1]
|
33 |
+
em_conv[-1] = 0.2 * em[-2] + 0.8 * em[-1]
|
34 |
+
return em_conv
|
segment/manual/__main__.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Parse the manual segmentation of the scenario into the final conversation format.
|
2 |
+
|
3 |
+
e.g. from scenario-manual_seg/volume1/chapter1.jsonl:
|
4 |
+
{"title": "..."}
|
5 |
+
{"speaker": "(scene change)", "text": ""}
|
6 |
+
{"speaker": "narrator", "text": "..."}
|
7 |
+
{"speaker": "narrator", "text": "..."}
|
8 |
+
|
9 |
+
{"speaker": "heroine", "text": "..."}
|
10 |
+
|
11 |
+
{"speaker": "protagonist", "text": "..."}
|
12 |
+
|
13 |
+
{"speaker": "heroine", "text": "..."}
|
14 |
+
|
15 |
+
{"speaker": "protagonist", "text": "..."}
|
16 |
+
...
|
17 |
+
|
18 |
+
to conversation/volume1/chapter1.json:
|
19 |
+
{
|
20 |
+
"title": "...",
|
21 |
+
"conversation": [
|
22 |
+
{
|
23 |
+
"request": [
|
24 |
+
{"speaker": "narrator", "text": "..."},
|
25 |
+
{"speaker": "narrator", "text": "..."},
|
26 |
+
],
|
27 |
+
"response": [
|
28 |
+
{"speaker": "heroine", "text": "..."}
|
29 |
+
]
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"request": [
|
33 |
+
{"speaker": "protagonist", "text": "..."}
|
34 |
+
],
|
35 |
+
"response": [
|
36 |
+
{"speaker": "heroine", "text": "..."}
|
37 |
+
]
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"request": [
|
41 |
+
{"speaker": "protagonist", "text": "..."},
|
42 |
+
...
|
43 |
+
"""
|
44 |
+
from ..utils import dump_conversation, map_to_file_or_dir, is_protagonist
|
45 |
+
|
46 |
+
import json
|
47 |
+
from sys import argv
|
48 |
+
from pathlib import Path
|
49 |
+
from typing import TypedDict
|
50 |
+
|
51 |
+
|
52 |
+
def generate_seg(ks_path: Path):
|
53 |
+
ks_name = f"{ks_path.parent.name}/{ks_path.stem}"
|
54 |
+
|
55 |
+
output_base = Path("./conversation")
|
56 |
+
output_base.mkdir(exist_ok=True)
|
57 |
+
(output_base / ks_path.parent.name).mkdir(exist_ok=True)
|
58 |
+
|
59 |
+
d = TypedDict("d", {"title": str, "conversation": list})(
|
60 |
+
{"title": "", "conversation": []}
|
61 |
+
)
|
62 |
+
key = "request"
|
63 |
+
current_round = dict(request=[], response=[])
|
64 |
+
current_scene = ""
|
65 |
+
warned = 5
|
66 |
+
with ks_path.open("r") as fi:
|
67 |
+
d["title"] = json.loads(next(fi))["title"]
|
68 |
+
|
69 |
+
for lineno, line in enumerate(fi, start=2):
|
70 |
+
line = line.strip()
|
71 |
+
if not line: # segmentation point
|
72 |
+
if key == "request":
|
73 |
+
key = "response"
|
74 |
+
else:
|
75 |
+
d["conversation"].append(current_round)
|
76 |
+
current_round = dict(request=[], response=[])
|
77 |
+
key = "request"
|
78 |
+
continue
|
79 |
+
|
80 |
+
entry = json.loads(line)
|
81 |
+
if entry["speaker"] == "(scene change)":
|
82 |
+
dump_conversation(d, output_base / f"{ks_name}.json")
|
83 |
+
assert (
|
84 |
+
not current_round["request"] and not current_round["response"]
|
85 |
+
), f"Unexpected scene change at line {lineno} in {ks_path}"
|
86 |
+
current_scene = entry["text"]
|
87 |
+
continue
|
88 |
+
current_round[key].append(entry)
|
89 |
+
if current_scene:
|
90 |
+
current_round["scene"] = current_scene
|
91 |
+
if entry["speaker"] != "narrator":
|
92 |
+
if warned > 0 and (key == "request") != is_protagonist(
|
93 |
+
entry["speaker"]
|
94 |
+
):
|
95 |
+
warned -= 1
|
96 |
+
print(f"WARN: Unexpected speaker at line {lineno}")
|
97 |
+
|
98 |
+
if current_round["request"]:
|
99 |
+
assert current_round["response"], "Unexpected end of file"
|
100 |
+
d["conversation"].append(current_round)
|
101 |
+
else:
|
102 |
+
assert d["conversation"][-1]["response"], "Unexpected end of file"
|
103 |
+
|
104 |
+
dump_conversation(d, output_base / f"{ks_name}.json")
|
105 |
+
|
106 |
+
|
107 |
+
if __name__ == "__main__":
|
108 |
+
if len(argv) != 2:
|
109 |
+
print(f"Usage: python -m {__package__} path/to/scenario_manual_seg.jsonl")
|
110 |
+
exit(1)
|
111 |
+
map_to_file_or_dir(generate_seg, Path(argv[1]))
|
segment/utils.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import json
|
3 |
+
from pathlib import Path
|
4 |
+
from typing import Callable, Concatenate
|
5 |
+
|
6 |
+
ENTRY = re.compile(r"{\n\s+\"speaker\": \"(.*)\",\n\s+\"text\": \"(.*)\"\n\s+}", re.M)
|
7 |
+
|
8 |
+
|
9 |
+
def compact_entry(output: str) -> str:
|
10 |
+
"""
|
11 |
+
Collapse the following:
|
12 |
+
{
|
13 |
+
"speaker": "...",
|
14 |
+
"text": "..."
|
15 |
+
},
|
16 |
+
into this:
|
17 |
+
{"speaker": "...", "text": "..."},
|
18 |
+
"""
|
19 |
+
return ENTRY.sub(r'{"speaker": "\1", "text": "\2"}', output)
|
20 |
+
|
21 |
+
|
22 |
+
def dump_conversation(conversation, output_path: Path):
|
23 |
+
with output_path.open("w") as fo:
|
24 |
+
output = json.dumps(conversation, ensure_ascii=False, indent=2)
|
25 |
+
output = compact_entry(output)
|
26 |
+
print(output, file=fo)
|
27 |
+
|
28 |
+
|
29 |
+
def map_to_file_or_dir[**P](
|
30 |
+
fn: Callable[Concatenate[Path, P], None],
|
31 |
+
path: Path,
|
32 |
+
*args: P.args,
|
33 |
+
**kwargs: P.kwargs,
|
34 |
+
):
|
35 |
+
if path.is_dir():
|
36 |
+
for p in path.iterdir():
|
37 |
+
print(f"Processing {p}")
|
38 |
+
fn(p, *args, **kwargs)
|
39 |
+
else:
|
40 |
+
fn(path, *args, **kwargs)
|
41 |
+
|
42 |
+
|
43 |
+
def is_protagonist(speaker: str) -> bool:
|
44 |
+
return speaker == "protagonist" or speaker == "我"
|
45 |
+
|
46 |
+
|
47 |
+
def is_heroine(speaker: str) -> bool:
|
48 |
+
return not (is_protagonist(speaker) or speaker == "narrator")
|