reddit_climate_comment / reddit_climate_comment.py
cathw's picture
Upload reddit_climate_comment.py
593feea verified
raw
history blame
5.18 kB
import csv
import json
import os
from datasets import GeneratorBasedBuilder, Features, Value, Sequence, SplitGenerator, BuilderConfig, DatasetInfo, Split
import logging
import pandas as pd
from typing import Dict
CITATION = ""
_DESCRIPTION = "Demo"
_URL = ""
_HOMEPAGE = ""
_LICENSE = ""
_URL = "https://github.com/catherine-ywang/reddit_climate_comment_data/raw/main/climate_comments.csv.zip"
class NewDataset(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"id": Value("string"),
"post_title": Value("string"),
"post_author": Value("string"),
"post_body": Value("string"),
"post_url": Value("string"),
"post_pic": Value("string"),
"subreddit": Value("string"),
"post_timestamp": Value("string"),
"post_upvotes": Value("int32"),
"post_permalink": Value("string"),
"comments": Sequence({
"CommentID": Value("string"),
"CommentAuthor": Value("string"),
"CommentBody": Value("string"),
"CommentTimestamp": Value("string"),
"CommentUpvotes": Value("int32"),
"CommentPermalink": Value("string"),
"replies": Sequence({
"ReplyID": Value("string"),
"ReplyAuthor": Value("string"),
"ReplyBody": Value("string"),
"ReplyTimestamp": Value("string"),
"ReplyUpvotes": Value("int32"),
"ReplyPermalink": Value("string"),
})
})
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract(_URL)
train_splits = SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": path+"/climate_comments.csv"})
return [train_splits]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath)
# Group the DataFrame by post ID
grouped_df = df.groupby('PostID')
for post_id, group in grouped_df:
post_data = group.iloc[0] # Get the data for the post
post_title = post_data['PostTitle']
post_author = post_data['PostAuthor']
post_body = post_data['PostBody']
post_url = post_data['PostUrl']
post_pic = post_data['PostPic']
subreddit = post_data['Subreddit']
post_timestamp = post_data['PostTimestamp']
post_upvotes = post_data['PostUpvotes']
post_permalink = post_data['PostPermalink']
comments = []
# Iterate over each unique comment ID
for comment_id in group['CommentID'].unique():
comment_data = group[group['CommentID'] == comment_id].iloc[0]
comment_author = comment_data['CommentAuthor']
comment_body = comment_data['CommentBody']
comment_timestamp = comment_data['CommentTimestamp']
comment_upvotes = comment_data['CommentUpvotes']
comment_permalink = comment_data['CommentPermalink']
# Get all replies for the current comment
replies = []
reply_group = df[df['CommentID'] == comment_id]
for _, reply_data in reply_group.iterrows():
reply_id = reply_data['ReplyID']
reply_author = reply_data['ReplyAuthor']
reply_body = reply_data['ReplyBody']
reply_timestamp = reply_data['ReplyTimestamp']
reply_upvotes = reply_data['ReplyUpvotes']
reply_permalink = reply_data['ReplyPermalink']
reply = {
"ReplyID": reply_id,
"ReplyAuthor": reply_author,
"ReplyBody": reply_body,
"ReplyTimestamp": reply_timestamp,
"ReplyUpvotes": reply_upvotes,
"ReplyPermalink": reply_permalink
}
replies.append(reply)
# Add comment with its replies to the list
comment = {
"CommentID": comment_id,
"CommentAuthor": comment_author,
"CommentBody": comment_body,
"CommentTimestamp": comment_timestamp,
"CommentUpvotes": comment_upvotes,
"CommentPermalink": comment_permalink,
"replies": replies
}
comments.append(comment)
yield post_id, {
"id": post_id,
"post_title": post_title,
"post_author": post_author,
"post_body": post_body,
"post_url": post_url,
"post_pic": post_pic,
"subreddit": subreddit,
"post_timestamp": post_timestamp,
"post_upvotes": post_upvotes,
"post_permalink": post_permalink,
"comments": comments
}