asr_malayalam / whisper_trainer.py
aoxo's picture
Rename trainer.py to whisper_trainer.py
f43cdb3 verified
import os
from datasets import load_from_disk, DatasetDict, concatenate_datasets
# Get all batch directories
batch_dirs = [d for d in os.listdir("processed_dataset") if d.startswith("batch_")]
batch_dirs.sort(key=lambda x: int(x.split('_')[1])) # Sort numerically
# Load each batch and combine them
processed_batches = []
for batch_dir in batch_dirs:
batch_path = os.path.join("processed_dataset", batch_dir)
batch_dataset = load_from_disk(batch_path)
processed_batches.append(batch_dataset)
# Combine all batches into one dataset
full_dataset = concatenate_datasets(processed_batches)
# Split into train and test
# First shuffle the dataset with a fixed seed for reproducibility
shuffled_dataset = full_dataset.shuffle(seed=42)
# Get the last 975 samples for test
test_size = 975
processed_test = shuffled_dataset.select(range(test_size))
processed_train = shuffled_dataset.select(range(test_size, len(shuffled_dataset)))
# Create the dataset_dict with the new split
dataset_dict = DatasetDict({
"train": processed_train,
"test": processed_test
})
# Verify the loading and splitting was successful
print("\nDataset split information:")
print(f"Total examples: {len(shuffled_dataset)}")
print(f"Training examples: {len(dataset_dict['train'])}")
print(f"Test examples: {len(dataset_dict['test'])}")
# Optional: Print the first example from each split to verify the structure
print("\nFirst training example structure:")
print(dataset_dict['train'][0].keys())
print("\nFirst test example structure:")
print(dataset_dict['test'][0].keys())
from transformers import WhisperForConditionalGeneration
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
model.generation_config.language = "malayalam"
model.generation_config.task = "transcribe"
model.generation_config.forced_decoder_ids = None
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
import torch
from dataclasses import dataclass
from typing import Any, Dict, List, Union
@dataclass
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
processor: Any
decoder_start_token_id: int
max_target_length: int = 448 # Add a parameter to control max length
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need different padding methods
# first treat the audio inputs by simply returning torch tensors
input_features = [{"input_features": feature["input_features"]} for feature in features]
batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
# get the tokenized label sequences
label_features = [{"input_ids": feature["labels"]} for feature in features]
# pad the labels to max length
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
# Truncate labels to max_target_length
labels = labels_batch["input_ids"]
if labels.shape[1] > self.max_target_length:
labels = labels[:, :self.max_target_length]
# replace padding with -100 to ignore loss correctly
labels = labels.masked_fill(labels_batch.attention_mask[:, :labels.shape[1]].ne(1), -100)
# if bos token is appended in previous tokenization step,
# cut bos token here as it's append later anyways
if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
labels = labels[:, 1:]
batch["labels"] = labels
return batch
# When initializing the data collator, you can now specify the max length
data_collator = DataCollatorSpeechSeq2SeqWithPadding(
processor=processor,
decoder_start_token_id=model.config.decoder_start_token_id,
max_target_length=448 # Explicitly set the max length
)
import evaluate
metric = evaluate.load("wer")
def compute_metrics(pred):
pred_ids = pred.predictions
label_ids = pred.label_ids
# replace -100 with the pad_token_id
label_ids[label_ids == -100] = tokenizer.pad_token_id
# we do not want to group tokens when computing the metrics
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True)
wer = 100 * metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
from transformers import Seq2SeqTrainingArguments
training_args = Seq2SeqTrainingArguments(
output_dir="./whisper-small-mal", # change to a repo name of your choice
per_device_train_batch_size=16,
gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size
learning_rate=1e-5,
warmup_steps=500,
max_steps=4000,
gradient_checkpointing=True,
fp16=True,
fp16_full_eval=True,
half_precision_backend='auto',
evaluation_strategy="steps",
per_device_eval_batch_size=8,
predict_with_generate=True,
generation_max_length=225,
save_steps=1000,
eval_steps=20,
logging_steps=25,
report_to=["tensorboard"],
load_best_model_at_end=True,
metric_for_best_model="wer",
greater_is_better=False,
push_to_hub=True,
)
from transformers import Seq2SeqTrainer
trainer = Seq2SeqTrainer(
args=training_args,
model=model,
train_dataset=dataset_dict["train"],
eval_dataset=dataset_dict["test"],
data_collator=data_collator,
compute_metrics=compute_metrics,
tokenizer=processor.feature_extractor,
)
processor.save_pretrained(training_args.output_dir)
trainer.train()