File size: 5,079 Bytes
77c2cb3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import os
from datasets import Dataset, DatasetDict, load_dataset
from datasets.features import Audio
import pandas as pd
import numpy as np
from tqdm import tqdm

# Function to load your custom dataset
def load_custom_dataset(data_dir):
    data = {
        "audio": [],
        "text": []
    }

    wav_dir = os.path.join(data_dir, 'wav')
    txt_dir = os.path.join(data_dir, 'transcription')

        # Assuming filenames in 'wav' and 'txt' match
    for wav_file in os.listdir(wav_dir):
            if wav_file.endswith('.wav'):
                txt_file = wav_file.replace('.wav', '.txt')
                wav_path = os.path.join(wav_dir, wav_file)
                txt_path = os.path.join(txt_dir, txt_file)

                # Read the transcription text
                with open(txt_path, 'r', encoding='utf-8') as f:
                    transcription = f.read().strip()

                # Append to the dataset
                data["audio"].append(wav_path)
                data["text"].append(transcription)

    # Create a pandas dataframe
    df = pd.DataFrame(data)

    # Convert to a Hugging Face dataset
    dataset = Dataset.from_pandas(df)

    # Define the audio feature (for .wav files)
    dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000))  # Adjust the sampling rate if needed

    return dataset

# Load your custom dataset
custom_train_dataset = load_custom_dataset("./")

# Load Common Voice test set (Malayalam)
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)

common_voice_test = common_voice_test.select_columns(["audio", "sentence"])

# Combine them into a DatasetDict
dataset_dict = DatasetDict({
    "train": custom_train_dataset,
    "test": common_voice_test
})

# Now you have the `dataset_dict` with your custom train set and the Common Voice test set
print(dataset_dict)

from transformers import WhisperFeatureExtractor

feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")

from transformers import WhisperTokenizer

tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")

from transformers import WhisperProcessor

processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")

print(dataset_dict['train'][0])

import gc  # for garbage collection

def prepare_dataset(batch):
    # Prepare input features for each audio file in the batch
    audio_arrays = [item["array"] for item in batch["audio"]]
    sampling_rates = [item["sampling_rate"] for item in batch["audio"]]

    # Extract features for each audio sample
    features = []
    for audio, sr in zip(audio_arrays, sampling_rates):
        feature = feature_extractor(audio, sampling_rate=sr).input_features[0]
        feature = np.array(feature, dtype=np.float16)
        features.append(feature)

        # Free memory after each feature extraction
        del audio  # Remove reference to the audio array
        del sr
        gc.collect()  # Trigger garbage collection to free memory

    # Store features in batch
    batch["input_features"] = features

    # Encode target text to label ids
    # Consider using a tokenizer with padding strategy (e.g., `padding="max_length"` or `padding="longest"`)
    batch["labels"] = tokenizer(batch["text"], padding="longest", truncation=True).input_ids

    return batch

# Function to process and save dataset in batches
def process_and_save_in_batches(dataset, batch_size=1000, save_path="processed_dataset"):
    # Create an empty list to store the processed batches
    all_processed = []

    # Loop through the dataset in chunks
    for start_idx in range(0, len(dataset), batch_size):
        # Get the batch slice
        batch = dataset[start_idx:start_idx+batch_size]
        batch = Dataset.from_dict(batch)
        # Apply the processing function to the batch
        processed_batch = batch.map(
            prepare_dataset,
            remove_columns=dataset.column_names,
            batched=True,
            batch_size=batch_size,
            num_proc = None,
        )
        print(f"Batch {start_idx} done")
        # Append the processed batch to the list
        all_processed.append(processed_batch)
        
        # Clear memory after processing each batch
        del batch  # Remove reference to the batch
        gc.collect()  # Trigger garbage collection

        # Save each processed batch to disk
        processed_batch.save_to_disk(os.path.join(save_path, f"batch_{start_idx // batch_size}"))
        del processed_batch  # Free memory after saving the batch
        gc.collect()

    # Optionally, if you want to save the whole dataset in one file at the end
    # You can merge all processed batches (not recommended for large datasets)
    final_dataset = concatenate_datasets(all_processed)
    final_dataset.save_to_disk(save_path)

# Process and save the dataset in batches
process_and_save_in_batches(dataset_dict['train'], batch_size=1000, save_path="processed_dataset")