Datasets:
File size: 6,171 Bytes
9c120fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
"""Magic"""
from typing import List
from functools import partial
import datasets
import pandas
VERSION = datasets.Version("1.0.0")
_BASE_FEATURE_NAMES = [
"major_axis_length"
"minor_axis_length"
"log_of_sum_of_content"
"ratio_of_sum_of_highest_pixels_and_size"
"ratio_of_highest_pixel_and_size"
"projected_distance_highest_to_center_pixel"
"third_root_of_third_moment_along_major_axis"
"third_root_of_third_moment_along_minor_axis"
"angle_major_axis_to_origin"
"distance_origin_to_center"
"class"
]
DESCRIPTION = "Magic dataset from the UCI ML repository."
_HOMEPAGE = "https://archive.ics.uci.edu/ml/datasets/Magic"
_URLS = ("https://archive.ics.uci.edu/ml/datasets/Magic")
_CITATION = """
@misc{misc_magic_gamma_telescope_159,
author = {Bock,R.},
title = {{MAGIC Gamma Telescope}},
year = {2007},
howpublished = {UCI Machine Learning Repository},
note = {{DOI}: \\url{10.24432/C52C8B}}
}"""
# Dataset info
urls_per_split = {
"train": "https://huggingface.co/datasets/mstz/magic/raw/main/magic04.data"
}
features_types_per_config = {
"magic": {
"major_axis_length": datasets.Value("float64"),
"minor_axis_length": datasets.Value("float64"),
"log_of_sum_of_content": datasets.Value("float64"),
"ratio_of_sum_of_highest_pixels_and_size": datasets.Value("float64"),
"ratio_of_highest_pixel_and_size": datasets.Value("float64"),
"projected_distance_highest_to_center_pixel": datasets.Value("float64"),
"third_root_of_third_moment_along_major_axis": datasets.Value("float64"),
"third_root_of_third_moment_along_minor_axis": datasets.Value("float64"),
"angle_major_axis_to_origin": datasets.Value("float64"),
"distance_origin_to_center": datasets.Value("float64"),
"class": datasets.ClassLabel(num_classes=2, names=("no", "yes"))
}
}
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
class MagicConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super(MagicConfig, self).__init__(version=VERSION, **kwargs)
self.features = features_per_config[kwargs["name"]]
class Magic(datasets.GeneratorBasedBuilder):
# dataset versions
DEFAULT_CONFIG = "magic"
BUILDER_CONFIGS = [
MagicConfig(name="magic",
description="Magic for binary classification.")
]
def _info(self):
info = datasets.DatasetInfo(description=DESCRIPTION, citation=_CITATION, homepage=_HOMEPAGE,
features=features_per_config[self.config.name])
return info
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
downloads = dl_manager.download_and_extract(urls_per_split)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
]
def _generate_examples(self, filepath: str):
if self.config.name == "encoding":
data = self.encodings()
for row_id, row in data.iterrows():
data_row = dict(row)
yield row_id, data_row
elif self.config.name in ["magic", "magic-no race", "race"]:
data = pandas.read_csv(filepath)
data = self.preprocess(data, config=self.config.name)
for row_id, row in data.iterrows():
data_row = dict(row)
yield row_id, data_row
else:
raise ValueError(f"Unknown config: {self.config.name}")
def encodings(self):
data = [pandas.DataFrame([(feature, original_value, encoded_value)
for original_value, encoded_value in d.items()],
columns=["feature", "original_value", "encoded_value"])
for feature, d in _ENCODING_DICS.items()]
data.append(pandas.DataFrame([("race", original_value, encoded_value)
for original_value, encoded_value in _RACE_ENCODING.items()],
columns=["feature", "original_value", "encoded_value"]))
data.append(pandas.DataFrame([("education", original_value, encoded_value)
for original_value, encoded_value in _EDUCATION_ENCODING.items()],
columns=["feature", "original_value", "encoded_value"]))
data = pandas.concat(data, axis="rows").reset_index()
data.drop("index", axis="columns", inplace=True)
return data
def preprocess(self, data: pandas.DataFrame, config: str = DEFAULT_CONFIG) -> pandas.DataFrame:
data.drop("education", axis="columns", inplace=True)
data = data.rename(columns={"threshold": "over_threshold", "sex": "is_male"})
data = data[["age", "capital_gain", "capital_loss", "education-num", "final_weight",
"hours_per_week", "marital_status", "native_country", "occupation",
"race", "relationship", "is_male", "workclass", "over_threshold"]]
data.columns = _BASE_FEATURE_NAMES
for feature in _ENCODING_DICS:
encoding_function = partial(self.encode, feature)
data.loc[:, feature] = data[feature].apply(encoding_function)
if config == "magic":
return data[list(features_types_per_config["magic"].keys())]
elif config == "magic-no race":
return data[list(features_types_per_config["magic-no race"].keys())]
elif config =="race":
data.loc[:, "race"] = data.race.apply(self.encode_race)
data = data[list(features_types_per_config["race"].keys())]
return data
else:
raise ValueError(f"Unknown config: {config}")
def encode(self, feature, value):
if feature in _ENCODING_DICS:
return _ENCODING_DICS[feature][value]
raise ValueError(f"Unknown feature: {feature}")
def encode_race(self, race):
return _RACE_ENCODING[race]
|