Datasets:
Upload 2 files
Browse files
digital_green_process_data.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
from datasets import Dataset, DatasetDict, Audio
|
4 |
+
import soundfile as sf
|
5 |
+
import numpy as np
|
6 |
+
from sklearn.model_selection import train_test_split
|
7 |
+
|
8 |
+
# Paths
|
9 |
+
audio_folder = '/home/azureuser/data2/dg_16/' # Path where your audio files are stored
|
10 |
+
csv_file = 'digital_green_recordings.csv' # Path to the CSV that contains audio paths and transcripts
|
11 |
+
|
12 |
+
# Read your CSV file (assumes it has columns: 'path' and 'transcript')
|
13 |
+
df = pd.read_csv(csv_file, sep="$")
|
14 |
+
|
15 |
+
# Create a new column for client_id (random or default if you don’t have speaker info)
|
16 |
+
df['client_id'] = ['speaker_' + str(i) for i in range(len(df))]
|
17 |
+
|
18 |
+
# If your CSV has relative paths, ensure the paths are correct
|
19 |
+
df['path'] = df['path'].apply(lambda x: os.path.join(audio_folder, x))
|
20 |
+
|
21 |
+
# Add additional columns needed for the Common Voice format (can be optional)
|
22 |
+
df['up_votes'] = 0
|
23 |
+
df['down_votes'] = 0
|
24 |
+
df['age'] = None
|
25 |
+
df['gender'] = None
|
26 |
+
df['accent'] = None
|
27 |
+
|
28 |
+
# Function to load and possibly convert audio to mono
|
29 |
+
def load_audio(file_path):
|
30 |
+
# Load audio file
|
31 |
+
audio, sr = sf.read(file_path)
|
32 |
+
# Convert to mono if stereo
|
33 |
+
if len(audio.shape) > 1:
|
34 |
+
audio = np.mean(audio, axis=1)
|
35 |
+
return {'audio': {'array': audio, 'sampling_rate': sr}}
|
36 |
+
|
37 |
+
# Apply audio loading function to DataFrame
|
38 |
+
df['audio'] = df['path'].apply(lambda x: load_audio(x))
|
39 |
+
|
40 |
+
train_df, test_df = train_test_split(df, test_size=0.2, random_state=42) # Adjust test_size as needed
|
41 |
+
|
42 |
+
# Convert DataFrames to Hugging Face Datasets
|
43 |
+
train_dataset = Dataset.from_pandas(train_df)
|
44 |
+
test_dataset = Dataset.from_pandas(test_df)
|
45 |
+
|
46 |
+
# Cast the 'audio' column to the 'audio' type
|
47 |
+
train_dataset = train_dataset.cast_column('audio', Audio())
|
48 |
+
test_dataset = test_dataset.cast_column('audio', Audio())
|
49 |
+
|
50 |
+
# Create a DatasetDict to simulate train/test/validation splits if needed
|
51 |
+
dataset_dict = DatasetDict({
|
52 |
+
'train': train_dataset,
|
53 |
+
'test': test_dataset # If you have separate splits, add them here (e.g., 'train', 'test', 'validation')
|
54 |
+
})
|
55 |
+
|
56 |
+
# Save the dataset (optional) for future use
|
57 |
+
dataset_dict.save_to_disk('data2/digital_green_data')
|
58 |
+
|
59 |
+
# Print a sample from the dataset
|
60 |
+
print(dataset_dict['train'][0])
|
61 |
+
|
62 |
+
print(dataset_dict['test'][0])
|
digital_green_recordings.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|