hatebr / sampling.py
ruanchaves's picture
feat: introduce train, validation and test splits
68d8703
raw
history blame contribute delete
No virus
2.77 kB
import pandas as pd
import numpy as np
from skmultilearn.model_selection import iterative_train_test_split
from hatebr import process_row
import json
DATASET_URL = "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv"
def generate_stratified_indexes(df, y_column="offensive_language"):
"""Generates stratified train, validation, and test indexes for given DataFrame.
Args:
df: A pandas DataFrame.
y_column: A string indicating the name of the column representing the target variable (default: "offensive_language").
Returns:
A tuple of numpy arrays containing the train, validation, and test indexes for the input DataFrame:
X_train_indexes: An array of indexes representing the train data.
X_dev_indexes: An array of indexes representing the validation data.
X_test_indexes: An array of indexes representing the test data.
y_train_indexes: An array of indexes representing the train target values.
y_dev_indexes: An array of indexes representing the validation target values.
y_test_indexes: An array of indexes representing the test target values.
"""
records = df.to_dict("records")
processed_records = [ process_row(row, None) for row in records ]
processed_df = pd.DataFrame(processed_records)
y = processed_df[[y_column]].to_numpy().astype(np.int32)
indices = np.arange(y.shape[0])
indices = indices.reshape(indices.shape[0], 1)
y = np.append(y, indices, axis=1)
processed_df.drop(columns=[y_column], inplace=True)
X = processed_df.to_numpy().astype(np.int32)
X = np.append(X, indices, axis=1)
X_train_dev, y_train_dev, X_test, y_test = iterative_train_test_split(X, y, test_size = 0.2)
X_train, y_train, X_dev, y_dev = iterative_train_test_split(X_train_dev, y_train_dev, test_size = 0.2)
X_train_indexes = X_train[:, -1]
X_dev_indexes = X_dev[:, -1]
X_test_indexes = X_test[:, -1]
y_train_indexes = y_train[:, -1]
y_dev_indexes = y_dev[:, -1]
y_test_indexes = y_test[:, -1]
return X_train_indexes, X_dev_indexes, X_test_indexes, y_train_indexes, y_dev_indexes, y_test_indexes
def main():
df = pd.read_csv(DATASET_URL)
df.drop(columns=["instagram_comments"], inplace=True)
X_train_indexes, X_dev_indexes, X_test_indexes, y_train_indexes, y_dev_indexes, y_test_indexes = generate_stratified_indexes(df)
final_indexes = {
"train": [int(x) for x in X_train_indexes],
"validation": [int(x) for x in X_dev_indexes],
"test": [int(x) for x in X_test_indexes]
}
with open("indexes.json", "w") as f:
json.dump(final_indexes, f)
if __name__ == "__main__":
main()