reddit_climate_comment / reddit_climate_comment.py
cathw's picture
Upload reddit_climate_comment.py
cf63bc2 verified
raw
history blame
4.85 kB
import csv
import json
import os
from datasets import GeneratorBasedBuilder, Features, Value, Sequence, SplitGenerator, BuilderConfig, DatasetInfo, Split
import logging
import pandas as pd
from typing import Dict
CITATION = ""
_DESCRIPTION = "Demo"
_URL = ""
_HOMEPAGE = ""
_LICENSE = ""
_URL = "https://github.com/catherine-ywang/reddit_climate_comment_data/raw/main/climate_comments.csv.zip"
class NewDataset(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"id": Value("string"),
"post_title": Value("string"),
"post_author": Value("string"),
"post_body": Value("string"),
"post_url": Value("string"),
"post_pic": Value("string"),
"subreddit": Value("string"),
"post_timestamp": Value("string"),
"post_upvotes": Value("int32"),
"post_permalink": Value("string"),
"comments": Sequence({
"CommentID": Value("string"),
"CommentAuthor": Value("string"),
"CommentBody": Value("string"),
"CommentTimestamp": Value("string"),
"CommentUpvotes": Value("int32"),
"CommentPermalink": Value("string"),
"replies": Sequence({
"ReplyID": Value("string"),
"ReplyAuthor": Value("string"),
"ReplyBody": Value("string"),
"ReplyTimestamp": Value("string"),
"ReplyUpvotes": Value("int32"),
"ReplyPermalink": Value("string"),
})
})
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract(_URL)
train_splits = SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": path+"/climate_comments.csv"})
return [train_splits]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath)
# Drop duplicate comments
df = df.drop_duplicates(subset=['CommentID', 'CommentBody'])
# Group the DataFrame by post ID
grouped_df = df.groupby('PostID')
for post_id, group in grouped_df:
post_data = group.iloc[0] # Get the data for the post
post_title = post_data['PostTitle']
post_author = post_data['PostAuthor']
post_body = post_data['PostBody']
post_url = post_data['PostUrl']
post_pic = post_data['PostPic']
subreddit = post_data['Subreddit']
post_timestamp = post_data['PostTimestamp']
post_upvotes = post_data['PostUpvotes']
post_permalink = post_data['PostPermalink']
comments = []
# Extract unique comments
unique_comment_ids = group['CommentID'].unique()
for comment_id in unique_comment_ids:
comment_row = group[group['CommentID'] == comment_id].iloc[0]
comment = {
"CommentID": comment_id,
"CommentAuthor": comment_row['CommentAuthor'],
"CommentBody": comment_row['CommentBody'],
"CommentTimestamp": comment_row['CommentTimestamp'],
"CommentUpvotes": comment_row['CommentUpvotes'],
"CommentPermalink": comment_row['CommentPermalink'],
"replies": [] # Initialize empty list for replies
}
# Check if there are replies for the current comment
replies_data = df[df['CommentID'] == comment_id][['ReplyID', 'ReplyAuthor', 'ReplyBody', 'ReplyTimestamp', 'ReplyUpvotes', 'ReplyPermalink']]
for _, reply_data in replies_data.iterrows():
reply = {
"ReplyID": str(reply_data['ReplyID']),
"ReplyAuthor": reply_data['ReplyAuthor'],
"ReplyBody": reply_data['ReplyBody'],
"ReplyTimestamp": reply_data['ReplyTimestamp'],
"ReplyUpvotes": reply_data['ReplyUpvotes'],
"ReplyPermalink": reply_data['ReplyPermalink']
}
comment["replies"].append(reply)
comments.append(comment)
yield post_id, {
"id": post_id,
"post_title": post_title,
"post_author": post_author,
"post_body": post_body,
"post_url": post_url,
"post_pic": post_pic,
"subreddit": subreddit,
"post_timestamp": post_timestamp,
"post_upvotes": post_upvotes,
"post_permalink": post_permalink,
"comments": comments
}