EchoShao8899
commited on
Commit
•
2e2ee32
1
Parent(s):
a336d7e
Update dataset.py
Browse files- dataset.py +24 -16
dataset.py
CHANGED
@@ -1,22 +1,23 @@
|
|
1 |
-
|
|
|
|
|
2 |
|
3 |
class FreshWikiDataset(GeneratorBasedBuilder):
|
4 |
def _info(self):
|
5 |
return DatasetInfo(
|
6 |
description="The FreshWiki Dataset is a collection of high-quality Wikipedia articles focusing on the most-edited pages from February 2022 to September 2023.",
|
7 |
-
features=
|
8 |
-
"json_file":
|
9 |
-
"txt_file":
|
10 |
-
"
|
11 |
}),
|
12 |
)
|
13 |
|
14 |
-
def _split_generators(self, dl_manager):
|
15 |
-
# Access the folder where all files are stored
|
16 |
data_dir = dl_manager.download_and_extract("https://huggingface.co/datasets/EchoShao8899/FreshWiki")
|
17 |
return [
|
18 |
-
|
19 |
-
name=
|
20 |
gen_kwargs={
|
21 |
"json_files": os.path.join(data_dir, "json"),
|
22 |
"txt_files": os.path.join(data_dir, "txt"),
|
@@ -26,13 +27,20 @@ class FreshWikiDataset(GeneratorBasedBuilder):
|
|
26 |
]
|
27 |
|
28 |
def _generate_examples(self, json_files, txt_files, topic_list_csv):
|
29 |
-
|
30 |
-
with open(topic_list_csv, "r") as csv_file:
|
31 |
reader = csv.DictReader(csv_file)
|
32 |
for idx, row in enumerate(reader):
|
33 |
topic_name = row['topic'].replace(' ', '_').replace('/', '_')
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import csv
|
3 |
+
from datasets import Dataset, DatasetInfo, Features, Value, Sequence, GeneratorBasedBuilder, DownloadManager, Split
|
4 |
|
5 |
class FreshWikiDataset(GeneratorBasedBuilder):
|
6 |
def _info(self):
|
7 |
return DatasetInfo(
|
8 |
description="The FreshWiki Dataset is a collection of high-quality Wikipedia articles focusing on the most-edited pages from February 2022 to September 2023.",
|
9 |
+
features=Features({
|
10 |
+
"json_file": Value("string"),
|
11 |
+
"txt_file": Value("string"),
|
12 |
+
"topic": Value("string"),
|
13 |
}),
|
14 |
)
|
15 |
|
16 |
+
def _split_generators(self, dl_manager: DownloadManager):
|
|
|
17 |
data_dir = dl_manager.download_and_extract("https://huggingface.co/datasets/EchoShao8899/FreshWiki")
|
18 |
return [
|
19 |
+
Split(
|
20 |
+
name=Split.TRAIN,
|
21 |
gen_kwargs={
|
22 |
"json_files": os.path.join(data_dir, "json"),
|
23 |
"txt_files": os.path.join(data_dir, "txt"),
|
|
|
27 |
]
|
28 |
|
29 |
def _generate_examples(self, json_files, txt_files, topic_list_csv):
|
30 |
+
with open(topic_list_csv, "r", encoding="utf-8") as csv_file:
|
|
|
31 |
reader = csv.DictReader(csv_file)
|
32 |
for idx, row in enumerate(reader):
|
33 |
topic_name = row['topic'].replace(' ', '_').replace('/', '_')
|
34 |
+
json_path = os.path.join(json_files, f'{topic_name}.json')
|
35 |
+
txt_path = os.path.join(txt_files, f'{topic_name}.txt')
|
36 |
+
|
37 |
+
if os.path.exists(json_path) and os.path.exists(txt_path):
|
38 |
+
with open(json_path, "r", encoding="utf-8") as json_file, \
|
39 |
+
open(txt_path, "r", encoding="utf-8") as txt_file:
|
40 |
+
yield idx, {
|
41 |
+
"json_file": json_file.read(),
|
42 |
+
"txt_file": txt_file.read(),
|
43 |
+
"topic": row['topic'],
|
44 |
+
}
|
45 |
+
else:
|
46 |
+
print(f"Warning: Files for topic '{row['topic']}' not found. Skipping.")
|