|
import os |
|
from datasets import Dataset, DatasetDict, load_dataset |
|
from datasets.features import Audio |
|
import pandas as pd |
|
|
|
|
|
def load_custom_dataset(data_dir): |
|
data = { |
|
"audio": [], |
|
"text": [] |
|
} |
|
|
|
wav_dir = os.path.join(data_dir, 'wav') |
|
txt_dir = os.path.join(data_dir, 'transcription') |
|
|
|
|
|
for wav_file in os.listdir(wav_dir): |
|
if wav_file.endswith('.wav'): |
|
txt_file = wav_file.replace('.wav', '.txt') |
|
wav_path = os.path.join(wav_dir, wav_file) |
|
txt_path = os.path.join(txt_dir, txt_file) |
|
|
|
|
|
with open(txt_path, 'r', encoding='utf-8') as f: |
|
transcription = f.read().strip() |
|
|
|
|
|
data["audio"].append(wav_path) |
|
data["text"].append(transcription) |
|
|
|
|
|
df = pd.DataFrame(data) |
|
|
|
|
|
dataset = Dataset.from_pandas(df) |
|
|
|
|
|
dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) |
|
|
|
return dataset |
|
|
|
|
|
custom_train_dataset = load_custom_dataset("./") |
|
|
|
|
|
common_voice_test = load_dataset("mozilla-foundation/common_voice_11_0", "ml", split="test", trust_remote_code=True) |
|
|
|
common_voice_test = common_voice_test.select_columns(["audio", "sentence"]) |
|
|
|
|
|
dataset_dict = DatasetDict({ |
|
"train": custom_train_dataset, |
|
"test": common_voice_test |
|
}) |
|
|
|
|
|
print(dataset_dict) |
|
|
|
from transformers import WhisperFeatureExtractor |
|
|
|
feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-small") |
|
|
|
from transformers import WhisperTokenizer |
|
|
|
tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe") |
|
|
|
from transformers import WhisperProcessor |
|
|
|
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe") |
|
|
|
print(dataset_dict['train'][0]) |
|
|
|
import gc |
|
|
|
def prepare_dataset(batch): |
|
|
|
audio_arrays = [item["array"] for item in batch["audio"]] |
|
sampling_rates = [item["sampling_rate"] for item in batch["audio"]] |
|
|
|
|
|
features = [] |
|
for audio, sr in zip(audio_arrays, sampling_rates): |
|
feature = feature_extractor(audio, sampling_rate=sr).input_features[0] |
|
features.append(feature) |
|
|
|
|
|
del audio |
|
del sr |
|
gc.collect() |
|
|
|
|
|
batch["input_features"] = features |
|
|
|
|
|
|
|
batch["labels"] = tokenizer(batch["text"], padding="longest", truncation=True).input_ids |
|
|
|
return batch |
|
|
|
|
|
dataset_dict = dataset_dict.map( |
|
prepare_dataset, |
|
remove_columns=dataset_dict.column_names["train"], |
|
batch_size=8, |
|
batched=True, |
|
) |
|
|
|
dataset_dict.save_to_disk("processed_dataset") |
|
|