asr_malayalam / whisper_processing.py
aoxo's picture
Rename processing.py to whisper_processing.py
67318ef verified
import os
from datasets import Dataset, DatasetDict, load_dataset
from datasets.features import Audio
import pandas as pd
# Function to load your custom dataset
def load_custom_dataset(data_dir):
data = {
"audio": [],
"text": []
}
wav_dir = os.path.join(data_dir, 'wav')
txt_dir = os.path.join(data_dir, 'transcription')
# Assuming filenames in 'wav' and 'txt' match
for wav_file in os.listdir(wav_dir):
if wav_file.endswith('.wav'):
txt_file = wav_file.replace('.wav', '.txt')
wav_path = os.path.join(wav_dir, wav_file)
txt_path = os.path.join(txt_dir, txt_file)
# Read the transcription text
with open(txt_path, 'r', encoding='utf-8') as f:
transcription = f.read().strip()
# Append to the dataset
data["audio"].append(wav_path)
data["text"].append(transcription)
# Create a pandas dataframe
df = pd.DataFrame(data)
# Convert to a Hugging Face dataset
dataset = Dataset.from_pandas(df)
# Define the audio feature (for .wav files)
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) # Adjust the sampling rate if needed
return dataset
# Load your custom dataset
custom_train_dataset = load_custom_dataset("./")
# Load Common Voice test set (Malayalam)
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True)
common_voice_test = common_voice_test.select_columns(["audio", "sentence"])
# Combine them into a DatasetDict
dataset_dict = DatasetDict({
"train": custom_train_dataset,
"test": common_voice_test
})
# Now you have the `dataset_dict` with your custom train set and the Common Voice test set
print(dataset_dict)
from transformers import WhisperFeatureExtractor
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small")
from transformers import WhisperTokenizer
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
print(dataset_dict['train'][0])
import gc # for garbage collection
def prepare_dataset(batch):
# Prepare input features for each audio file in the batch
audio_arrays = [item["array"] for item in batch["audio"]]
sampling_rates = [item["sampling_rate"] for item in batch["audio"]]
# Extract features for each audio sample
features = []
for audio, sr in zip(audio_arrays, sampling_rates):
feature = feature_extractor(audio, sampling_rate=sr).input_features[0]
features.append(feature)
# Free memory after each feature extraction
del audio # Remove reference to the audio array
del sr
gc.collect() # Trigger garbage collection to free memory
# Store features in batch
batch["input_features"] = features
# Encode target text to label ids
# Consider using a tokenizer with padding strategy (e.g., `padding="max_length"` or `padding="longest"`)
batch["labels"] = tokenizer(batch["text"], padding="longest", truncation=True).input_ids
return batch
# Use Dataset.map to apply the function
dataset_dict = dataset_dict.map(
prepare_dataset,
remove_columns=dataset_dict.column_names["train"],
batch_size=8, # Process smaller batches
batched=True, # Enable batched processing
)
dataset_dict.save_to_disk("processed_dataset")