aoxo commited on
Commit
39e96bc
1 Parent(s): d532f51

Upload char_tokenizer.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. char_tokenizer.py +224 -0
char_tokenizer.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import librosa
4
+ from tokenizers import Tokenizer
5
+ from tokenizers.models import WordPiece
6
+ from tokenizers.pre_tokenizers import Whitespace
7
+ from tokenizers.processors import TemplateProcessing
8
+ from tokenizers.trainers import WordPieceTrainer
9
+
10
+ class MalayalamCharacterTokenizer:
11
+ def __init__(self, transcription_dir, wav_dir):
12
+ """
13
+ Initialize character-level tokenizer with directories for transcriptions and audio files
14
+
15
+ :param transcription_dir: Path to folder containing text transcriptions
16
+ :param wav_dir: Path to folder containing WAV audio files
17
+ """
18
+ self.transcription_dir = transcription_dir
19
+ self.wav_dir = wav_dir
20
+
21
+ # Define special tokens
22
+ self.special_tokens = [
23
+ "[PAD]",
24
+ "[UNK]",
25
+ "[CLS]",
26
+ "[SEP]",
27
+ "[MASK]"
28
+ ]
29
+
30
+ # Initialize text tokenizer
31
+ self.text_tokenizer, self.trainer = self._create_character_tokenizer()
32
+
33
+ # Audio tokenization parameters
34
+ self.audio_tokenizer = {
35
+ "sample_rate": 16000, # Standard for speech models
36
+ "n_mfcc": 13, # Number of MFCCs to extract
37
+ "n_fft": 2048, # FFT window size
38
+ "hop_length": 512 # Hop length between frames
39
+ }
40
+
41
+ def _create_character_tokenizer(self):
42
+ """
43
+ Create a character-level tokenizer for Malayalam text
44
+ """
45
+ # Initialize tokenizer with WordPiece model (we'll treat each character as a token)
46
+ tokenizer = Tokenizer(WordPiece(unk_token="[UNK]"))
47
+
48
+ # Use whitespace as pre-tokenizer
49
+ tokenizer.pre_tokenizer = Whitespace()
50
+
51
+ # Create trainer for character-level tokenization
52
+ trainer = WordPieceTrainer(
53
+ vocab_size=10000, # Large enough to capture all characters
54
+ special_tokens=self.special_tokens,
55
+ continuing_subword_prefix='##', # This won't be used for character-level, but required by WordPiece
56
+ show_progress=True
57
+ )
58
+
59
+ # Prepare special tokens with IDs for post-processing
60
+ special_tokens_dict = {
61
+ token: tokenizer.token_to_id(token) if tokenizer.token_to_id(token) is not None
62
+ else len(tokenizer.get_vocab()) + list(self.special_tokens).index(token)
63
+ for token in ["[CLS]", "[SEP]"]
64
+ }
65
+
66
+ # Add special token processing
67
+ tokenizer.post_processor = TemplateProcessing(
68
+ single="[CLS] $A [SEP]",
69
+ pair="[CLS] $A [SEP] $B:1 [SEP]:1",
70
+ special_tokens=[
71
+ ("[CLS]", special_tokens_dict["[CLS]"]),
72
+ ("[SEP]", special_tokens_dict["[SEP]"])
73
+ ]
74
+ )
75
+
76
+ return tokenizer, trainer
77
+
78
+ def _get_matched_files(self):
79
+ """
80
+ Find matching transcription and audio files
81
+
82
+ :return: List of tuples (transcription_path, audio_path)
83
+ """
84
+ matched_files = []
85
+
86
+ # Get all transcription files
87
+ for trans_file in os.listdir(self.transcription_dir):
88
+ # Remove extension to match with audio file
89
+ base_name = os.path.splitext(trans_file)[0]
90
+
91
+ # Check for corresponding WAV file
92
+ wav_path = os.path.join(self.wav_dir, base_name + '.wav')
93
+ trans_path = os.path.join(self.transcription_dir, trans_file)
94
+
95
+ if os.path.exists(wav_path):
96
+ matched_files.append((trans_path, wav_path))
97
+
98
+ return matched_files
99
+
100
+ def train_character_tokenizer(self):
101
+ """
102
+ Train character-level tokenizer on all transcription files
103
+
104
+ :return: Trained tokenizer
105
+ """
106
+ # Collect all transcriptions
107
+ transcriptions = []
108
+ for trans_path, _ in self._get_matched_files():
109
+ with open(trans_path, 'r', encoding='utf-8') as f:
110
+ transcriptions.append(f.read().strip())
111
+
112
+ # Train the tokenizer on transcriptions
113
+ # This will effectively create a character-level vocabulary
114
+ self.text_tokenizer.train_from_iterator(transcriptions, self.trainer)
115
+
116
+ return self.text_tokenizer
117
+
118
+ def process_dataset(self, tokenizer):
119
+ """
120
+ Process entire dataset, tokenizing text and extracting audio features
121
+
122
+ :param tokenizer: Trained tokenizer
123
+ :return: Processed dataset with tokenized text and audio features
124
+ """
125
+ dataset = []
126
+ matched_files = self._get_matched_files()
127
+
128
+ for trans_path, wav_path in matched_files:
129
+ # Read transcription
130
+ with open(trans_path, 'r', encoding='utf-8') as f:
131
+ transcription = f.read().strip()
132
+
133
+ # Tokenize text (character-level)
134
+ text_tokens = tokenizer.encode(transcription).ids
135
+
136
+ # Extract audio features
137
+ audio_features = self._extract_audio_features(wav_path)
138
+
139
+ dataset.append({
140
+ 'transcription': transcription,
141
+ 'text_tokens': text_tokens,
142
+ 'audio_features': audio_features,
143
+ 'audio_path': wav_path,
144
+ 'transcription_path': trans_path
145
+ })
146
+
147
+ return dataset
148
+
149
+ def _extract_audio_features(self, audio_path):
150
+ """
151
+ Extract MFCC features from audio file
152
+
153
+ :param audio_path: Path to WAV file
154
+ :return: Extracted audio features
155
+ """
156
+ # Load audio file
157
+ audio, sr = librosa.load(
158
+ audio_path,
159
+ sr=self.audio_tokenizer['sample_rate']
160
+ )
161
+
162
+ # Extract MFCCs
163
+ mfccs = librosa.feature.mfcc(
164
+ y=audio,
165
+ sr=sr,
166
+ n_mfcc=self.audio_tokenizer['n_mfcc'],
167
+ n_fft=self.audio_tokenizer['n_fft'],
168
+ hop_length=self.audio_tokenizer['hop_length']
169
+ )
170
+
171
+ return mfccs.T.tolist()
172
+
173
+ def save_dataset(self, output_path, tokenizer):
174
+ """
175
+ Save processed dataset to JSON
176
+
177
+ :param output_path: Path to save processed dataset
178
+ :param tokenizer: Trained tokenizer
179
+ """
180
+ dataset = self.process_dataset(tokenizer)
181
+
182
+ with open(output_path, 'w', encoding='utf-8') as f:
183
+ json.dump(dataset, f, ensure_ascii=False, indent=2)
184
+
185
+ print(f"Saved dataset to {output_path}")
186
+
187
+ def save_tokenizer(self, output_dir, tokenizer):
188
+ """
189
+ Save tokenizer configurations
190
+
191
+ :param output_dir: Directory to save tokenizer files
192
+ :param tokenizer: Trained tokenizer
193
+ """
194
+ os.makedirs(output_dir, exist_ok=True)
195
+
196
+ # Save text tokenizer vocabulary and configuration
197
+ tokenizer.save(os.path.join(output_dir, 'malayalam_character_tokenizer.json'))
198
+
199
+ # Save audio tokenizer configuration
200
+ with open(os.path.join(output_dir, 'audio_tokenizer.json'), 'w') as f:
201
+ json.dump(self.audio_tokenizer, f, indent=2)
202
+
203
+ # Example usage
204
+ if __name__ == "__main__":
205
+ # Initialize character-level tokenizer
206
+ tokenizer_manager = MalayalamCharacterTokenizer(
207
+ transcription_dir='transcription',
208
+ wav_dir='wav'
209
+ )
210
+
211
+ # Train character tokenizer
212
+ trained_tokenizer = tokenizer_manager.train_character_tokenizer()
213
+
214
+ # Save dataset
215
+ #tokenizer_manager.save_dataset(
216
+ # 'malayalam_character_dataset.json',
217
+ # trained_tokenizer
218
+ #)
219
+
220
+ # Save tokenizer configurations
221
+ tokenizer_manager.save_tokenizer(
222
+ 'malayalam_character_tokenizer',
223
+ trained_tokenizer
224
+ )