Datasets:
Upload reddit_climate_comment.py
Browse files
reddit_climate_comment.py
CHANGED
@@ -56,9 +56,6 @@ class NewDataset(GeneratorBasedBuilder):
|
|
56 |
def _generate_examples(self, filepath):
|
57 |
df = pd.read_csv(filepath)
|
58 |
|
59 |
-
# Remove duplicate comment IDs
|
60 |
-
df = df.drop_duplicates(subset=['CommentID'])
|
61 |
-
|
62 |
# Group the DataFrame by post ID
|
63 |
grouped_df = df.groupby('PostID')
|
64 |
|
@@ -95,7 +92,7 @@ class NewDataset(GeneratorBasedBuilder):
|
|
95 |
}
|
96 |
|
97 |
# Check if there are replies for the current comment
|
98 |
-
replies_data =
|
99 |
for _, reply_data in replies_data.iterrows():
|
100 |
reply = {
|
101 |
"ReplyID": str(reply_data['ReplyID']),
|
@@ -129,5 +126,6 @@ class NewDataset(GeneratorBasedBuilder):
|
|
129 |
|
130 |
|
131 |
|
|
|
132 |
|
133 |
|
|
|
56 |
def _generate_examples(self, filepath):
|
57 |
df = pd.read_csv(filepath)
|
58 |
|
|
|
|
|
|
|
59 |
# Group the DataFrame by post ID
|
60 |
grouped_df = df.groupby('PostID')
|
61 |
|
|
|
92 |
}
|
93 |
|
94 |
# Check if there are replies for the current comment
|
95 |
+
replies_data = df[df['CommentID'] == comment_id][['ReplyID', 'ReplyAuthor', 'ReplyBody', 'ReplyTimestamp', 'ReplyUpvotes', 'ReplyPermalink']]
|
96 |
for _, reply_data in replies_data.iterrows():
|
97 |
reply = {
|
98 |
"ReplyID": str(reply_data['ReplyID']),
|
|
|
126 |
|
127 |
|
128 |
|
129 |
+
|
130 |
|
131 |
|