import os import datasets from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler import numpy as np # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ _URL = "https://huggingface.co/datasets/wwydmanski/reuters10k/raw/main/" class Reuters10K(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("0.0.1") def _info(self): return datasets.DatasetInfo( description="Reuters10K dataset", version=Reuters10K.VERSION, ) def _split_generators(self, dl_manager): train_url = _URL + "train.npy" test_url = _URL + "test.npy" data_dir = dl_manager.download_and_extract([train_url, test_url]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": os.path.join(data_dir, "train.npy") }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": os.path.join(data_dir, "dev.npy") }, ) ] def _generate_examples(self, filepath): """Yields examples.""" train_dataset = np.load(filepath, allow_pickle=True) X_train = train_dataset.item()['data'] Y_train = train_dataset.item()['label'] scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) # yield "key", {"text": text, "label": label} for i, (x, y) in enumerate(zip(X_train, Y_train)): yield i, {"features": x, "label": y}