Raivis Dejus
commited on
Commit
•
c9aad36
1
Parent(s):
084ac91
Adding wikipedia cleanup
Browse files- clean.sh +6 -0
- tools/wikipedia/GetWikipedia.ipynb +11 -2
- tools/wikipedia/GetWikipedia.py +12 -3
clean.sh
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Remove strings in parentheses
|
2 |
+
sed -i -e 's|([^)]*)||g' lv.txt
|
3 |
+
sed -i -e 's|\[[^)]*\]||g' lv.txt
|
4 |
+
|
5 |
+
# Delete lines with foreign characters
|
6 |
+
sed -i -e '/[óéàðæíúłßáśęινςü]/d' lv.txt
|
tools/wikipedia/GetWikipedia.ipynb
CHANGED
@@ -27,13 +27,22 @@
|
|
27 |
"from tqdm import tqdm\n",
|
28 |
"\n",
|
29 |
"DATE = \"20221120\"\n",
|
|
|
|
|
30 |
"\n",
|
31 |
"dataset = load_dataset('joelito/EU_Wikipedias', date=DATE, language=\"lv\", split='train')\n",
|
32 |
"\n",
|
33 |
"with open(f'wikipedia_{DATE}.txt', 'w') as file:\n",
|
34 |
-
" for entry in tqdm(dataset)
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
" file.write(f'{entry[\"title\"]}\\n\\n')\n",
|
36 |
-
" file.write(f'{entry[\"text\"]}\\n\\n\\n')\n",
|
|
|
37 |
" file.close()\n",
|
38 |
"\n",
|
39 |
"print('Done') "
|
|
|
27 |
"from tqdm import tqdm\n",
|
28 |
"\n",
|
29 |
"DATE = \"20221120\"\n",
|
30 |
+
"CUTOFF_SECTIONS = ['Atsauces un piezīmes', 'Atsauces', 'Ārējās saites', 'Literatūra', 'Skatīt arī',\n",
|
31 |
+
" ' Atsauces un piezīmes', ' Atsauces', ' Ārējās saites', ' Literatūra', ' Skatīt arī']\n",
|
32 |
"\n",
|
33 |
"dataset = load_dataset('joelito/EU_Wikipedias', date=DATE, language=\"lv\", split='train')\n",
|
34 |
"\n",
|
35 |
"with open(f'wikipedia_{DATE}.txt', 'w') as file:\n",
|
36 |
+
" for entry in tqdm(dataset):\n",
|
37 |
+
" # Will cut off reference sections\n",
|
38 |
+
" cutoffs = [len(entry[\"text\"])]\n",
|
39 |
+
" for section in CUTOFF_SECTIONS:\n",
|
40 |
+
" if entry[\"text\"].find('\\n\\n' + section) != -1:\n",
|
41 |
+
" cutoffs.append(entry[\"text\"].find('\\n\\n' + section));\n",
|
42 |
+
" \n",
|
43 |
" file.write(f'{entry[\"title\"]}\\n\\n')\n",
|
44 |
+
" file.write(f'{entry[\"text\"][0:min(cutoffs)]}\\n\\n\\n')\n",
|
45 |
+
" \n",
|
46 |
" file.close()\n",
|
47 |
"\n",
|
48 |
"print('Done') "
|
tools/wikipedia/GetWikipedia.py
CHANGED
@@ -2,13 +2,22 @@ from datasets import load_dataset
|
|
2 |
from tqdm import tqdm
|
3 |
|
4 |
DATE = "20221120"
|
|
|
|
|
5 |
|
6 |
dataset = load_dataset('joelito/EU_Wikipedias', date=DATE, language="lv", split='train')
|
7 |
|
8 |
with open(f'wikipedia_{DATE}.txt', 'w') as file:
|
9 |
-
for entry in tqdm(dataset):
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
file.write(f'{entry["title"]}\n\n')
|
11 |
-
file.write(f'{entry["text"]}\n\n\n')
|
|
|
12 |
file.close()
|
13 |
|
14 |
-
print('Done')
|
|
|
2 |
from tqdm import tqdm
|
3 |
|
4 |
DATE = "20221120"
|
5 |
+
CUTOFF_SECTIONS = ['Atsauces un piezīmes', 'Atsauces', 'Ārējās saites', 'Literatūra', 'Skatīt arī',
|
6 |
+
' Atsauces un piezīmes', ' Atsauces', ' Ārējās saites', ' Literatūra', ' Skatīt arī']
|
7 |
|
8 |
dataset = load_dataset('joelito/EU_Wikipedias', date=DATE, language="lv", split='train')
|
9 |
|
10 |
with open(f'wikipedia_{DATE}.txt', 'w') as file:
|
11 |
+
for entry in tqdm(dataset):
|
12 |
+
# Will cut off reference sections
|
13 |
+
cutoffs = [len(entry["text"])]
|
14 |
+
for section in CUTOFF_SECTIONS:
|
15 |
+
if entry["text"].find('\n\n' + section) != -1:
|
16 |
+
cutoffs.append(entry["text"].find('\n\n' + section));
|
17 |
+
|
18 |
file.write(f'{entry["title"]}\n\n')
|
19 |
+
file.write(f'{entry["text"][0:min(cutoffs)]}\n\n\n')
|
20 |
+
|
21 |
file.close()
|
22 |
|
23 |
+
print('Done')
|