Datasets:
File size: 4,846 Bytes
c5b8a78 0a0b00b c5b8a78 0a0b00b c5b8a78 0a0b00b c5b8a78 0a0b00b 3fa893d 0a0b00b 7e9cbee cf63bc2 7e9cbee cf63bc2 7e9cbee cf63bc2 7e9cbee cf63bc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import csv
import json
import os
from datasets import GeneratorBasedBuilder, Features, Value, Sequence, SplitGenerator, BuilderConfig, DatasetInfo, Split
import logging
import pandas as pd
from typing import Dict
CITATION = ""
_DESCRIPTION = "Demo"
_URL = ""
_HOMEPAGE = ""
_LICENSE = ""
_URL = "https://github.com/catherine-ywang/reddit_climate_comment_data/raw/main/climate_comments.csv.zip"
class NewDataset(GeneratorBasedBuilder):
def _info(self):
return DatasetInfo(
description=_DESCRIPTION,
features=Features({
"id": Value("string"),
"post_title": Value("string"),
"post_author": Value("string"),
"post_body": Value("string"),
"post_url": Value("string"),
"post_pic": Value("string"),
"subreddit": Value("string"),
"post_timestamp": Value("string"),
"post_upvotes": Value("int32"),
"post_permalink": Value("string"),
"comments": Sequence({
"CommentID": Value("string"),
"CommentAuthor": Value("string"),
"CommentBody": Value("string"),
"CommentTimestamp": Value("string"),
"CommentUpvotes": Value("int32"),
"CommentPermalink": Value("string"),
"replies": Sequence({
"ReplyID": Value("string"),
"ReplyAuthor": Value("string"),
"ReplyBody": Value("string"),
"ReplyTimestamp": Value("string"),
"ReplyUpvotes": Value("int32"),
"ReplyPermalink": Value("string"),
})
})
}),
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract(_URL)
train_splits = SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": path+"/climate_comments.csv"})
return [train_splits]
def _generate_examples(self, filepath):
df = pd.read_csv(filepath)
# Drop duplicate comments
df = df.drop_duplicates(subset=['CommentID', 'CommentBody'])
# Group the DataFrame by post ID
grouped_df = df.groupby('PostID')
for post_id, group in grouped_df:
post_data = group.iloc[0] # Get the data for the post
post_title = post_data['PostTitle']
post_author = post_data['PostAuthor']
post_body = post_data['PostBody']
post_url = post_data['PostUrl']
post_pic = post_data['PostPic']
subreddit = post_data['Subreddit']
post_timestamp = post_data['PostTimestamp']
post_upvotes = post_data['PostUpvotes']
post_permalink = post_data['PostPermalink']
comments = []
# Extract unique comments
unique_comment_ids = group['CommentID'].unique()
for comment_id in unique_comment_ids:
comment_row = group[group['CommentID'] == comment_id].iloc[0]
comment = {
"CommentID": comment_id,
"CommentAuthor": comment_row['CommentAuthor'],
"CommentBody": comment_row['CommentBody'],
"CommentTimestamp": comment_row['CommentTimestamp'],
"CommentUpvotes": comment_row['CommentUpvotes'],
"CommentPermalink": comment_row['CommentPermalink'],
"replies": [] # Initialize empty list for replies
}
# Check if there are replies for the current comment
replies_data = df[df['CommentID'] == comment_id][['ReplyID', 'ReplyAuthor', 'ReplyBody', 'ReplyTimestamp', 'ReplyUpvotes', 'ReplyPermalink']]
for _, reply_data in replies_data.iterrows():
reply = {
"ReplyID": str(reply_data['ReplyID']),
"ReplyAuthor": reply_data['ReplyAuthor'],
"ReplyBody": reply_data['ReplyBody'],
"ReplyTimestamp": reply_data['ReplyTimestamp'],
"ReplyUpvotes": reply_data['ReplyUpvotes'],
"ReplyPermalink": reply_data['ReplyPermalink']
}
comment["replies"].append(reply)
comments.append(comment)
yield post_id, {
"id": post_id,
"post_title": post_title,
"post_author": post_author,
"post_body": post_body,
"post_url": post_url,
"post_pic": post_pic,
"subreddit": subreddit,
"post_timestamp": post_timestamp,
"post_upvotes": post_upvotes,
"post_permalink": post_permalink,
"comments": comments
} |