import os import datasets from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler import numpy as np # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ class Reuters10K(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("0.0.1") def _info(self): return datasets.DatasetInfo( description="Reuters10K dataset", version=Reuters10K.VERSION, ) def _split_generators(self, dl_manager): train_url = "train.npy" test_url = "test.npy" downloaded_files = dl_manager.download_and_extract({ "train": train_url, "test": test_url }) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": downloaded_files["train"] }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": downloaded_files["test"] }, ) ] def _generate_examples(self, filepath): """Yields examples.""" train_dataset = np.load(filepath, allow_pickle=True) X_train = train_dataset.item()['data'] Y_train = train_dataset.item()['label'] scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) # yield "key", {"text": text, "label": label} for i, (x, y) in enumerate(zip(X_train, Y_train)): yield i, {"features": x, "label": y}