|
import os |
|
import json |
|
import librosa |
|
from tokenizers import Tokenizer |
|
from tokenizers.models import WordPiece |
|
from tokenizers.pre_tokenizers import Whitespace |
|
from tokenizers.trainers import WordPieceTrainer |
|
import numpy as np |
|
|
|
class MalayalamDatasetTokenizer: |
|
def __init__(self, transcription_dir, wav_dir, vocab_size=16000): |
|
""" |
|
Initialize tokenizer with directories for transcriptions and audio files |
|
|
|
:param transcription_dir: Path to folder containing text transcriptions |
|
:param wav_dir: Path to folder containing WAV audio files |
|
:param vocab_size: Size of the vocabulary for text tokenization |
|
""" |
|
self.transcription_dir = transcription_dir |
|
self.wav_dir = wav_dir |
|
|
|
|
|
self.text_tokenizer = self._create_text_tokenizer(vocab_size) |
|
|
|
|
|
self.audio_tokenizer = { |
|
"sample_rate": 16000, |
|
"n_mfcc": 13, |
|
"n_fft": 2048, |
|
"hop_length": 512 |
|
} |
|
|
|
def _create_text_tokenizer(self, vocab_size): |
|
""" |
|
Create a WordPiece tokenizer for Malayalam text |
|
""" |
|
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]")) |
|
tokenizer.pre_tokenizer = Whitespace() |
|
|
|
special_tokens = ["[PAD]", "[UNK]", "[CLS]", "[SEP]"] |
|
|
|
trainer = WordPieceTrainer( |
|
vocab_size=vocab_size, |
|
special_tokens=special_tokens |
|
) |
|
|
|
return tokenizer |
|
|
|
def _get_matched_files(self): |
|
""" |
|
Find matching transcription and audio files |
|
|
|
:return: List of tuples (transcription_path, audio_path) |
|
""" |
|
matched_files = [] |
|
|
|
|
|
for trans_file in os.listdir(self.transcription_dir): |
|
|
|
base_name = os.path.splitext(trans_file)[0] |
|
|
|
|
|
wav_path = os.path.join(self.wav_dir, base_name + '.wav') |
|
trans_path = os.path.join(self.transcription_dir, trans_file) |
|
|
|
if os.path.exists(wav_path): |
|
matched_files.append((trans_path, wav_path)) |
|
|
|
return matched_files |
|
|
|
def process_dataset(self): |
|
""" |
|
Process entire dataset, tokenizing text and extracting audio features |
|
|
|
:return: Processed dataset with tokenized text and audio features |
|
""" |
|
dataset = [] |
|
matched_files = self._get_matched_files() |
|
|
|
for trans_path, wav_path in matched_files: |
|
|
|
with open(trans_path, 'r', encoding='utf-8') as f: |
|
transcription = f.read().strip() |
|
|
|
|
|
text_tokens = self.text_tokenizer.encode(transcription).ids |
|
|
|
|
|
audio_features = self._extract_audio_features(wav_path) |
|
|
|
dataset.append({ |
|
'transcription': transcription, |
|
'text_tokens': text_tokens, |
|
'audio_features': audio_features, |
|
'audio_path': wav_path, |
|
'transcription_path': trans_path |
|
}) |
|
|
|
return dataset |
|
|
|
def _extract_audio_features(self, audio_path): |
|
""" |
|
Extract MFCC features from audio file |
|
|
|
:param audio_path: Path to WAV file |
|
:return: Extracted audio features |
|
""" |
|
|
|
audio, sr = librosa.load( |
|
audio_path, |
|
sr=self.audio_tokenizer['sample_rate'] |
|
) |
|
|
|
|
|
mfccs = librosa.feature.mfcc( |
|
y=audio, |
|
sr=sr, |
|
n_mfcc=self.audio_tokenizer['n_mfcc'], |
|
n_fft=self.audio_tokenizer['n_fft'], |
|
hop_length=self.audio_tokenizer['hop_length'] |
|
) |
|
|
|
return mfccs.T.tolist() |
|
|
|
def train_text_tokenizer(self): |
|
""" |
|
Train text tokenizer on all transcription files |
|
""" |
|
|
|
transcriptions = [] |
|
for trans_path, _ in self._get_matched_files(): |
|
with open(trans_path, 'r', encoding='utf-8') as f: |
|
transcriptions.append(f.read().strip()) |
|
|
|
|
|
self.text_tokenizer.train_from_iterator(transcriptions) |
|
|
|
def save_dataset(self, output_path): |
|
""" |
|
Save processed dataset to JSON |
|
|
|
:param output_path: Path to save processed dataset |
|
""" |
|
dataset = self.process_dataset() |
|
|
|
with open(output_path, 'w', encoding='utf-8') as f: |
|
json.dump(dataset, f, ensure_ascii=False, indent=2) |
|
|
|
print(f"Saved dataset to {output_path}") |
|
|
|
def save_tokenizer(self, output_dir): |
|
""" |
|
Save tokenizer configurations |
|
|
|
:param output_dir: Directory to save tokenizer files |
|
""" |
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
with open(os.path.join(output_dir, 'text_tokenizer.json'), 'w', encoding='utf-8') as f: |
|
json.dump({ |
|
'vocab': self.text_tokenizer.get_vocab(), |
|
'model_type': 'WordPiece' |
|
}, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
with open(os.path.join(output_dir, 'audio_tokenizer.json'), 'w') as f: |
|
json.dump(self.audio_tokenizer, f, indent=2) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
tokenizer = MalayalamDatasetTokenizer( |
|
transcription_dir='transcription', |
|
wav_dir='wav' |
|
) |
|
|
|
|
|
tokenizer.train_text_tokenizer() |
|
|
|
|
|
|
|
|
|
|
|
tokenizer.save_tokenizer('malayalam_tokenizer') |
|
|