|
import re |
|
from pathlib import Path |
|
from datasets import Dataset |
|
|
|
import pandas as pd |
|
|
|
|
|
def generate_chunks(text: str, chunk_size: int = 128) -> list[str]: |
|
sentences = re.split("[?.!]", text) |
|
chunks = [] |
|
current_chunk_tokens = [] |
|
for sentence in sentences: |
|
tokens = sentence.split() |
|
if (len(current_chunk_tokens) + len(tokens)) <= 128: |
|
current_chunk_tokens.extend(tokens) |
|
else: |
|
chunks.append(" ".join(current_chunk_tokens)) |
|
current_chunk_tokens = [*tokens] |
|
return chunks |
|
|
|
|
|
textfiles = Path("Corpus-v1.1/texts").glob("*.txt") |
|
entries = [] |
|
for file in textfiles: |
|
year, author, work, *_ = file.stem.split("_") |
|
with file.open() as in_file: |
|
text = in_file.read() |
|
entries.append(dict(year=year, author=author, work=work, text=text)) |
|
|
|
data = pd.DataFrame.from_records(entries) |
|
data["full_title"] = data["author"] + " - " + data["work"] |
|
data["text"] = data["text"].map(generate_chunks) |
|
data = data.explode("text") |
|
|
|
seed = 42 |
|
n_works = 64 |
|
n_chunks_per_work = 32 |
|
sampled_titles = pd.Series(data["full_title"].unique()).sample( |
|
n_works, random_state=seed |
|
) |
|
sampled_data = data[data["full_title"].isin(sampled_titles)] |
|
sampled_data = sampled_data.groupby(["full_title"]).sample( |
|
n_chunks_per_work, random_state=seed |
|
) |
|
|
|
ds = Dataset.from_pandas( |
|
sampled_data[["year", "author", "work", "text", "full_title"]].reset_index() |
|
).shuffle(seed=seed) |
|
ds.push_to_hub("kardosdrur/historical-danish-clustering") |
|
|