asr_malayalam / tokenizer_construct.py
aoxo's picture
Upload tokenizer_construct.py with huggingface_hub
1ddbd4f verified
raw
history blame
6.23 kB
import os
import json
import librosa
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.pre_tokenizers import Whitespace
from tokenizers.trainers import WordPieceTrainer
import numpy as np
class MalayalamDatasetTokenizer:
def __init__(self, transcription_dir, wav_dir, vocab_size=16000):
"""
Initialize tokenizer with directories for transcriptions and audio files
:param transcription_dir: Path to folder containing text transcriptions
:param wav_dir: Path to folder containing WAV audio files
:param vocab_size: Size of the vocabulary for text tokenization
"""
self.transcription_dir = transcription_dir
self.wav_dir = wav_dir
# Initialize text tokenizer
self.text_tokenizer = self._create_text_tokenizer(vocab_size)
# Audio tokenization parameters
self.audio_tokenizer = {
"sample_rate": 16000, # Standard for speech models
"n_mfcc": 13, # Number of MFCCs to extract
"n_fft": 2048, # FFT window size
"hop_length": 512 # Hop length between frames
}
def _create_text_tokenizer(self, vocab_size):
"""
Create a WordPiece tokenizer for Malayalam text
"""
tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
tokenizer.pre_tokenizer = Whitespace()
special_tokens = ["[PAD]", "[UNK]", "[CLS]", "[SEP]"]
trainer = WordPieceTrainer(
vocab_size=vocab_size,
special_tokens=special_tokens
)
return tokenizer
def _get_matched_files(self):
"""
Find matching transcription and audio files
:return: List of tuples (transcription_path, audio_path)
"""
matched_files = []
# Get all transcription files
for trans_file in os.listdir(self.transcription_dir):
# Remove extension to match with audio file
base_name = os.path.splitext(trans_file)[0]
# Check for corresponding WAV file
wav_path = os.path.join(self.wav_dir, base_name + '.wav')
trans_path = os.path.join(self.transcription_dir, trans_file)
if os.path.exists(wav_path):
matched_files.append((trans_path, wav_path))
return matched_files
def process_dataset(self):
"""
Process entire dataset, tokenizing text and extracting audio features
:return: Processed dataset with tokenized text and audio features
"""
dataset = []
matched_files = self._get_matched_files()
for trans_path, wav_path in matched_files:
# Read transcription
with open(trans_path, 'r', encoding='utf-8') as f:
transcription = f.read().strip()
# Tokenize text
text_tokens = self.text_tokenizer.encode(transcription).ids
# Extract audio features
audio_features = self._extract_audio_features(wav_path)
dataset.append({
'transcription': transcription,
'text_tokens': text_tokens,
'audio_features': audio_features,
'audio_path': wav_path,
'transcription_path': trans_path
})
return dataset
def _extract_audio_features(self, audio_path):
"""
Extract MFCC features from audio file
:param audio_path: Path to WAV file
:return: Extracted audio features
"""
# Load audio file
audio, sr = librosa.load(
audio_path,
sr=self.audio_tokenizer['sample_rate']
)
# Extract MFCCs
mfccs = librosa.feature.mfcc(
y=audio,
sr=sr,
n_mfcc=self.audio_tokenizer['n_mfcc'],
n_fft=self.audio_tokenizer['n_fft'],
hop_length=self.audio_tokenizer['hop_length']
)
return mfccs.T.tolist()
def train_text_tokenizer(self):
"""
Train text tokenizer on all transcription files
"""
# Collect all transcriptions
transcriptions = []
for trans_path, _ in self._get_matched_files():
with open(trans_path, 'r', encoding='utf-8') as f:
transcriptions.append(f.read().strip())
# Train tokenizer
self.text_tokenizer.train_from_iterator(transcriptions)
def save_dataset(self, output_path):
"""
Save processed dataset to JSON
:param output_path: Path to save processed dataset
"""
dataset = self.process_dataset()
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(dataset, f, ensure_ascii=False, indent=2)
print(f"Saved dataset to {output_path}")
def save_tokenizer(self, output_dir):
"""
Save tokenizer configurations
:param output_dir: Directory to save tokenizer files
"""
os.makedirs(output_dir, exist_ok=True)
# Save text tokenizer vocabulary
with open(os.path.join(output_dir, 'text_tokenizer.json'), 'w', encoding='utf-8') as f:
json.dump({
'vocab': self.text_tokenizer.get_vocab(),
'model_type': 'WordPiece'
}, f, ensure_ascii=False, indent=2)
# Save audio tokenizer configuration
with open(os.path.join(output_dir, 'audio_tokenizer.json'), 'w') as f:
json.dump(self.audio_tokenizer, f, indent=2)
# Example usage
if __name__ == "__main__":
# Initialize tokenizer
tokenizer = MalayalamDatasetTokenizer(
transcription_dir='transcription',
wav_dir='wav'
)
# Train text tokenizer
tokenizer.train_text_tokenizer()
# Process and save dataset
# tokenizer.save_dataset('malayalam_dataset.json')
# Save tokenizer configurations
tokenizer.save_tokenizer('malayalam_tokenizer')