Datasets:
"""TODO: Add a description here.""" | |
import csv | |
import json | |
import os | |
import logging | |
import datasets | |
from csvtransformerjson import CSVtoJSONTransformer | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {A great new dataset}, | |
author={huggingface, Inc. | |
}, | |
year={2024} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URLS = { | |
"reddit_climate": "cathw/comment_data" | |
} | |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
class NewDataset(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="reddit_climate", version=VERSION, description="This part of my dataset covers a first domain") | |
] | |
DEFAULT_CONFIG_NAME = "reddit_climate" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
features = datasets.Features({ | |
"Subreddit": datasets.Value("string"), | |
"Posts": datasets.Sequence({ | |
"PostID": datasets.Value("int32"), | |
"PostTitle": datasets.Value("string"), | |
"Comments": datasets.Sequence({ | |
"CommentID": datasets.Value("string"), | |
"Author": datasets.Value("string"), | |
"CommentBody": datasets.Value("string"), | |
"Timestamp": datasets.Value("string"), | |
"Upvotes": datasets.Value("int32"), | |
"NumberofReplies": datasets.Value("int32"), | |
}), | |
}), | |
}) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
config_name = getattr(self.config, 'name', self.DEFAULT_CONFIG_NAME) | |
urls = _URLS.get(config_name, {}) # Get the URLs for the configuration name, if not found, return an empty dictionary | |
data_dir = dl_manager.download_and_extract(urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": data_dir, | |
"split": "train", | |
}, | |
), | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, filepath, split): | |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
with open(filepath, encoding="utf-8") as f: | |
csv_reader = csv.reader(f) | |
data = CSVtoJSONTransformer(csv_reader) | |
for idx, row in enumerate(data): | |
subreddit = row["Subreddit"] | |
posts = [] | |
# Check if the "Posts" key is present in the current row | |
if "Posts" in row: | |
for post in row["Posts"]: | |
post_id = post["PostID"] | |
post_title = post["PostTitle"] | |
comments = [] | |
for comment in post["Comments"]: | |
comment_id = comment["CommentID"] | |
author = comment["Author"] | |
comment_body = comment["CommentBody"] | |
timestamp = comment["Timestamp"] | |
upvotes = comment["Upvotes"] | |
number_of_replies = comment["NumberofReplies"] | |
comments.append({ | |
"CommentID": comment_id, | |
"Author": author, | |
"CommentBody": comment_body, | |
"Timestamp": timestamp, | |
"Upvotes": upvotes, | |
"NumberofReplies": number_of_replies | |
}) | |
posts.append({ | |
"PostID": post_id, | |
"PostTitle": post_title, | |
"Comments": comments | |
}) | |
else: | |
# Handle cases where the "Posts" key is missing | |
posts = None | |
yield idx, { | |
"Subreddit": subreddit, | |
"Posts": posts | |
} | |