Datasets:

Lauler commited on
Commit
51e0d66
1 Parent(s): cb4e874

file to create the train/val/test splits

Browse files
Files changed (1) hide show
  1. create_tar.py +91 -0
create_tar.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tarfile
2
+ import os
3
+ import shutil
4
+ import multiprocessing as mp
5
+ from pathlib import Path
6
+ from tqdm import tqdm
7
+ import pandas as pd
8
+
9
+ df = pd.read_parquet("df_train_v2.parquet")
10
+ df["filename_full"] = "/home/fatrek/data_network/faton/riksdagen_anforanden/data/rixvox_v2/" + df["filename"]
11
+ df = df.rename(columns={"sex": "gender"})
12
+
13
+ # Group by intressent_id and count occurences of each id in the dataset, keep the columns speaker
14
+ # and intressent_id and sort by the count of occurences
15
+
16
+ df["speaker_total_hours"] = df.groupby(["speaker", "party"])["duration"].transform("sum") / 3600
17
+ df_hours = df.groupby(["speaker", "party"]).first().sort_values("speaker_total_hours", ascending=False).reset_index()
18
+ df_hours = df_hours.sample(frac=1, random_state=1337) # Shuffle the rows
19
+
20
+ # Set train equals to True until the cumulative sum of speaker_total_hours is 98% of the total
21
+ df_hours["train"] = df_hours["speaker_total_hours"].cumsum() / df_hours["speaker_total_hours"].sum() < 0.98
22
+ # Valid equals True until cumulative sum is 1% of the total
23
+ df_hours["valid"] = False
24
+ df_hours.loc[df_hours["train"] == False, "valid"] = (
25
+ df_hours[df_hours["train"] == 0]["speaker_total_hours"].cumsum() / df_hours["speaker_total_hours"].sum() < 0.01
26
+ )
27
+ df_hours["test"] = (df_hours["train"] == False) & (df_hours["valid"] == False) # The rest is test
28
+
29
+ # Create splits
30
+ df_train = pd.merge(df, df_hours.loc[df_hours["train"], ["speaker", "party"]], on=["speaker", "party"], how="inner")
31
+ df_valid = pd.merge(df, df_hours.loc[df_hours["valid"], ["speaker", "party"]], on=["speaker", "party"], how="inner")
32
+ df_test = pd.merge(df, df_hours.loc[df_hours["test"], ["speaker", "party"]], on=["speaker", "party"], how="inner")
33
+
34
+
35
+ def split_creator(df, observations_per_shard, shard_name):
36
+ df["shard"] = range(0, len(df))
37
+ df["shard"] = df["shard"] // observations_per_shard
38
+ df["shard"] = shard_name + "_" + df["shard"].astype(str)
39
+ return df["shard"]
40
+
41
+
42
+ df_train["shard"] = split_creator(df_train, 6500, "train")
43
+ df_valid["shard"] = split_creator(df_valid, 6500, "dev")
44
+ df_test["shard"] = split_creator(df_test, 6500, "test")
45
+
46
+ df_train["nr_words"] = df_train["text"].str.split().str.len()
47
+ df_train = df_train[df_train["nr_words"] <= 160].reset_index(drop=True)
48
+ df_train = df_train.drop(columns="nr_words")
49
+
50
+
51
+ def create_tar(df, data_folder="/home/fatrek/data_network/faton/rixvox/data"):
52
+ shard_filename = df["shard"].reset_index(drop=True).values[0]
53
+ shard_filename = shard_filename + ".tar.gz"
54
+ split = df["shard"].reset_index(drop=True).str.extract(r"(.*)_")[0][0] # train_0 -> train
55
+ os.makedirs(os.path.join(data_folder, split), exist_ok=True)
56
+
57
+ print(f"Creating tarfile: {os.path.join(data_folder, split, shard_filename)}")
58
+ with tarfile.open(os.path.join(data_folder, split, shard_filename), "w:gz") as tar:
59
+ for filename in df["filename_full"].values:
60
+ tar.add(Path(filename), arcname=Path(filename).relative_to(Path(filename).parent.parent), recursive=False)
61
+
62
+
63
+ # Group by shard and split dataframes in to several dataframes in list
64
+ groups = df_train.groupby("shard")
65
+ df_train_list = [groups.get_group(x) for x in groups.groups]
66
+ groups = df_valid.groupby("shard")
67
+ df_valid_list = [groups.get_group(x) for x in groups.groups]
68
+ groups = df_test.groupby("shard")
69
+ df_test_list = [groups.get_group(x) for x in groups.groups]
70
+
71
+
72
+ data_folder = "/home/fatrek/data_network/faton/RixVox/data"
73
+
74
+ # for shard in df_train_list:
75
+ # create_tar(shard, data_folder)
76
+
77
+ with mp.Pool(16) as pool:
78
+ pool.map(create_tar, df_train_list)
79
+
80
+ with mp.Pool(1) as pool:
81
+ pool.map(create_tar, df_valid_list)
82
+ pool.map(create_tar, df_test_list)
83
+
84
+
85
+ df_train = df_train.drop(columns=["shard", "filename_full", "file_size"])
86
+ df_valid = df_valid.drop(columns=["shard", "filename_full", "file_size"])
87
+ df_test = df_test.drop(columns=["shard", "filename_full", "file_size"])
88
+
89
+ df_train.to_parquet(os.path.join("data", "train_metadata.parquet"), index=False)
90
+ df_valid.to_parquet(os.path.join("data", "dev_metadata.parquet"), index=False)
91
+ df_test.to_parquet(os.path.join("data", "test_metadata.parquet"), index=False)