File size: 1,504 Bytes
3312e73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import re
from pathlib import Path
from datasets import Dataset

import pandas as pd


def generate_chunks(text: str, chunk_size: int = 128) -> list[str]:
    sentences = re.split("[?.!]", text)
    chunks = []
    current_chunk_tokens = []
    for sentence in sentences:
        tokens = sentence.split()
        if (len(current_chunk_tokens) + len(tokens)) <= 128:
            current_chunk_tokens.extend(tokens)
        else:
            chunks.append(" ".join(current_chunk_tokens))
            current_chunk_tokens = [*tokens]
    return chunks


textfiles = Path("Corpus-v1.1/texts").glob("*.txt")
entries = []
for file in textfiles:
    year, author, work, *_ = file.stem.split("_")
    with file.open() as in_file:
        text = in_file.read()
        entries.append(dict(year=year, author=author, work=work, text=text))

data = pd.DataFrame.from_records(entries)
data["full_title"] = data["author"] + " - " + data["work"]
data["text"] = data["text"].map(generate_chunks)
data = data.explode("text")

seed = 42
n_works = 64
n_chunks_per_work = 32
sampled_titles = pd.Series(data["full_title"].unique()).sample(
    n_works, random_state=seed
)
sampled_data = data[data["full_title"].isin(sampled_titles)]
sampled_data = sampled_data.groupby(["full_title"]).sample(
    n_chunks_per_work, random_state=seed
)

ds = Dataset.from_pandas(
    sampled_data[["year", "author", "work", "text", "full_title"]].reset_index()
).shuffle(seed=seed)
ds.push_to_hub("kardosdrur/historical-danish-clustering")