|
import datasets |
|
from fastcore.utils import compose |
|
from clean_funcs import * |
|
|
|
fi_mc4 = datasets.load_dataset("mc4", "fi") |
|
print(fi_mc4) |
|
|
|
data_preprocessing_funcs = compose(*[fix_html, remove_control_char, remove_remaining_control_chars, remove_unicode_symbols, |
|
standardise_punc, remove_news_tags, replace_urls, replace_usernames, remove_duplicate_words_punctuation, remove_multi_space]) |
|
data_stats_funcs = compose(*[count_alphabet, count_numbers, count_upper, count_str_len, |
|
predict_lang, calculate_alphabet_ratio, calculate_number_ratio, calculate_upper_ratio]) |
|
|
|
min_alphabet_ratio = 0.75 |
|
max_upper_ratio = 0.10 |
|
max_number_ratio = 0.05 |
|
min_pred_lang_percentage = 0.95 |
|
|
|
|
|
num_rows = fi_mc4["train"].num_rows |
|
print(f"Original dataset train rows {num_rows}") |
|
fi_mc4["train"] = fi_mc4["train"].map( |
|
data_preprocessing_funcs, num_proc=64, batched=True, writer_batch_size=100000) |
|
|
|
fi_train_only_longer = fi_mc4["train"].filter( |
|
lambda example: len(example['text'].split()) >= 20, num_proc=64) |
|
num_rows = fi_train_only_longer.num_rows |
|
print(f"Only longer texts dataset train rows {num_rows}") |
|
|
|
fi_train_only_longer = fi_train_only_longer.map( |
|
data_stats_funcs, num_proc=64, batched=False, writer_batch_size=100000) |
|
|
|
fi_train_cleaned = fi_train_only_longer.filter(lambda example: example['alphabet_ratio'] > min_alphabet_ratio and example['upper_ratio'] < max_upper_ratio and example[ |
|
'number_ratio'] < max_number_ratio and example['predicted_lang'] == '__label__fi' and example['predicted_lang_percentage'] > min_pred_lang_percentage, num_proc=64) |
|
num_rows = fi_train_cleaned.num_rows |
|
print(f"Final cleaned dataset train rows {num_rows}") |
|
|
|
|
|
num_rows = fi_mc4["validation"].num_rows |
|
print(f"Original dataset val rows {num_rows}") |
|
fi_mc4["validation"] = fi_mc4["validation"].map( |
|
data_preprocessing_funcs, num_proc=64, batched=True) |
|
|
|
fi_val_only_longer = fi_mc4["validation"].filter( |
|
lambda example: len(example['text'].split()) >= 20, num_proc=64) |
|
num_rows = fi_val_only_longer.num_rows |
|
print(f"Only longer texts dataset val rows {num_rows}") |
|
|
|
fi_val_only_longer = fi_val_only_longer.map( |
|
data_stats_funcs, num_proc=64, batched=False) |
|
|
|
fi_val_cleaned = fi_val_only_longer.filter(lambda example: example['alphabet_ratio'] > min_alphabet_ratio and example['upper_ratio'] < max_upper_ratio and example['number_ratio'] |
|
< max_number_ratio and example['predicted_lang'] == '__label__fi' and example['predicted_lang_percentage'] > min_pred_lang_percentage, num_proc=64) |
|
num_rows = fi_val_cleaned.num_rows |
|
print(f"Final cleaned dataset val rows {num_rows}") |
|
|
|
|
|
fi_train_cleaned = fi_train_cleaned.remove_columns(["alphabet_len", "number_len", "upper_len", "total_len", "predicted_lang", "predicted_lang_percentage", "alphabet_ratio", "number_ratio", "upper_ratio"]) |
|
fi_val_cleaned = fi_val_cleaned.remove_columns(["alphabet_len", "number_len", "upper_len", "total_len", "predicted_lang", "predicted_lang_percentage", "alphabet_ratio", "number_ratio", "upper_ratio"]) |
|
fi_train_cleaned.to_csv("train.csv", num_proc=64, index=False) |
|
fi_val_cleaned.to_csv("valid.csv", num_proc=64, index=False) |
|
|