|
import os |
|
from datasets import load_from_disk, DatasetDict, concatenate_datasets |
|
|
|
|
|
batch_dirs = [d for d in os.listdir("processed_dataset") if d.startswith("batch_")] |
|
batch_dirs.sort(key=lambda x: int(x.split('_')[1])) |
|
|
|
|
|
processed_batches = [] |
|
for batch_dir in batch_dirs: |
|
batch_path = os.path.join("processed_dataset", batch_dir) |
|
batch_dataset = load_from_disk(batch_path) |
|
processed_batches.append(batch_dataset) |
|
|
|
|
|
full_dataset = concatenate_datasets(processed_batches) |
|
|
|
|
|
|
|
shuffled_dataset = full_dataset.shuffle(seed=42) |
|
|
|
|
|
test_size = 975 |
|
processed_test = shuffled_dataset.select(range(test_size)) |
|
processed_train = shuffled_dataset.select(range(test_size, len(shuffled_dataset))) |
|
|
|
|
|
dataset_dict = DatasetDict({ |
|
"train": processed_train, |
|
"test": processed_test |
|
}) |
|
|
|
|
|
print("\nDataset split information:") |
|
print(f"Total examples: {len(shuffled_dataset)}") |
|
print(f"Training examples: {len(dataset_dict['train'])}") |
|
print(f"Test examples: {len(dataset_dict['test'])}") |
|
|
|
|
|
print("\nFirst training example structure:") |
|
print(dataset_dict['train'][0].keys()) |
|
print("\nFirst test example structure:") |
|
print(dataset_dict['test'][0].keys()) |
|
|
|
from transformers import WhisperForConditionalGeneration |
|
|
|
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small") |
|
|
|
model.generation_config.language = "malayalam" |
|
|
|
model.generation_config.task = "transcribe" |
|
|
|
model.generation_config.forced_decoder_ids = None |
|
|
|
from transformers import WhisperProcessor |
|
|
|
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe") |
|
|
|
import torch |
|
|
|
from dataclasses import dataclass |
|
from typing import Any, Dict, List, Union |
|
|
|
@dataclass |
|
@dataclass |
|
class DataCollatorSpeechSeq2SeqWithPadding: |
|
processor: Any |
|
decoder_start_token_id: int |
|
max_target_length: int = 448 |
|
|
|
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: |
|
|
|
|
|
input_features = [{"input_features": feature["input_features"]} for feature in features] |
|
batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") |
|
|
|
|
|
label_features = [{"input_ids": feature["labels"]} for feature in features] |
|
|
|
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") |
|
|
|
|
|
labels = labels_batch["input_ids"] |
|
if labels.shape[1] > self.max_target_length: |
|
labels = labels[:, :self.max_target_length] |
|
|
|
|
|
labels = labels.masked_fill(labels_batch.attention_mask[:, :labels.shape[1]].ne(1), -100) |
|
|
|
|
|
|
|
if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item(): |
|
labels = labels[:, 1:] |
|
|
|
batch["labels"] = labels |
|
|
|
return batch |
|
|
|
|
|
data_collator = DataCollatorSpeechSeq2SeqWithPadding( |
|
processor=processor, |
|
decoder_start_token_id=model.config.decoder_start_token_id, |
|
max_target_length=448 |
|
) |
|
|
|
import evaluate |
|
|
|
metric = evaluate.load("wer") |
|
|
|
def compute_metrics(pred): |
|
pred_ids = pred.predictions |
|
label_ids = pred.label_ids |
|
|
|
|
|
label_ids[label_ids == -100] = tokenizer.pad_token_id |
|
|
|
|
|
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) |
|
label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True) |
|
|
|
wer = 100 * metric.compute(predictions=pred_str, references=label_str) |
|
|
|
return {"wer": wer} |
|
|
|
from transformers import Seq2SeqTrainingArguments |
|
|
|
training_args = Seq2SeqTrainingArguments( |
|
output_dir="./whisper-small-mal", |
|
per_device_train_batch_size=16, |
|
gradient_accumulation_steps=1, |
|
learning_rate=1e-5, |
|
warmup_steps=500, |
|
max_steps=4000, |
|
gradient_checkpointing=True, |
|
fp16=True, |
|
fp16_full_eval=True, |
|
half_precision_backend='auto', |
|
evaluation_strategy="steps", |
|
per_device_eval_batch_size=8, |
|
predict_with_generate=True, |
|
generation_max_length=225, |
|
save_steps=1000, |
|
eval_steps=20, |
|
logging_steps=25, |
|
report_to=["tensorboard"], |
|
load_best_model_at_end=True, |
|
metric_for_best_model="wer", |
|
greater_is_better=False, |
|
push_to_hub=True, |
|
) |
|
|
|
from transformers import Seq2SeqTrainer |
|
|
|
trainer = Seq2SeqTrainer( |
|
args=training_args, |
|
model=model, |
|
train_dataset=dataset_dict["train"], |
|
eval_dataset=dataset_dict["test"], |
|
data_collator=data_collator, |
|
compute_metrics=compute_metrics, |
|
tokenizer=processor.feature_extractor, |
|
) |
|
|
|
processor.save_pretrained(training_args.output_dir) |
|
|
|
trainer.train() |
|
|