Datasets:
File size: 940 Bytes
9b4efa3 62df3ff 312eaed 9b4efa3 312eaed 9b4efa3 312eaed b65b873 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
import pandas as pd
n = 2
total_n = 16
# list of all the csv files
csv_files = [
f"en-wikihow-qa-dataset-{n}k.csv",
f"ru-wikihow-qa-dataset-{n}k.csv",
f"pt-wikihow-qa-dataset-{n}k.csv",
f"nl-wikihow-qa-dataset-{n}k.csv",
f"it-wikihow-qa-dataset-{n}k.csv",
f"fr-wikihow-qa-dataset-{n}k.csv",
f"es-wikihow-qa-dataset-{n}k.csv",
f"de-wikihow-qa-dataset-{n}k.csv"
]
# create an empty dataframe to hold the merged data
merged_data = pd.DataFrame()
# loop through all the csv files, read them into dataframes, and merge them
for file in csv_files:
data = pd.read_csv(file)
merged_data = pd.concat([merged_data, data], ignore_index=True)
# write the merged data to new csv and parquet files
merged_data.to_csv(f"multilingial-wikihow-qa-dataset-{total_n}k.csv", index=False)
merged_data.to_parquet(f"multilingial-wikihow-qa-dataset-{total_n}k.parquet", row_group_size=100, engine="pyarrow", index=False) |