File size: 5,700 Bytes
607fece a7c5183 607fece a7c5183 607fece a7c5183 607fece a7c5183 607fece a7c5183 607fece a7c5183 607fece a7c5183 607fece a7c5183 607fece |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import os
from datasets import load_from_disk, DatasetDict, concatenate_datasets
# Get all batch directories
batch_dirs = [d for d in os.listdir("processed_dataset") if d.startswith("batch_")]
batch_dirs.sort(key=lambda x: int(x.split('_')[1])) # Sort numerically
# Load each batch and combine them
processed_batches = []
for batch_dir in batch_dirs:
batch_path = os.path.join("processed_dataset", batch_dir)
batch_dataset = load_from_disk(batch_path)
processed_batches.append(batch_dataset)
# Combine all batches into one dataset
full_dataset = concatenate_datasets(processed_batches)
# Split into train and test
# First shuffle the dataset with a fixed seed for reproducibility
shuffled_dataset = full_dataset.shuffle(seed=42)
# Get the last 975 samples for test
test_size = 975
processed_test = shuffled_dataset.select(range(test_size))
processed_train = shuffled_dataset.select(range(test_size, len(shuffled_dataset)))
# Create the dataset_dict with the new split
dataset_dict = DatasetDict({
"train": processed_train,
"test": processed_test
})
# Verify the loading and splitting was successful
print("\nDataset split information:")
print(f"Total examples: {len(shuffled_dataset)}")
print(f"Training examples: {len(dataset_dict['train'])}")
print(f"Test examples: {len(dataset_dict['test'])}")
# Optional: Print the first example from each split to verify the structure
print("\nFirst training example structure:")
print(dataset_dict['train'][0].keys())
print("\nFirst test example structure:")
print(dataset_dict['test'][0].keys())
from transformers import WhisperForConditionalGeneration
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
model.generation_config.language = "malayalam"
model.generation_config.task = "transcribe"
model.generation_config.forced_decoder_ids = None
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-small", language="Malayalam", task="transcribe")
import torch
from dataclasses import dataclass
from typing import Any, Dict, List, Union
@dataclass
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
processor: Any
decoder_start_token_id: int
max_target_length: int = 448 # Add a parameter to control max length
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need different padding methods
# first treat the audio inputs by simply returning torch tensors
input_features = [{"input_features": feature["input_features"]} for feature in features]
batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
# get the tokenized label sequences
label_features = [{"input_ids": feature["labels"]} for feature in features]
# pad the labels to max length
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
# Truncate labels to max_target_length
labels = labels_batch["input_ids"]
if labels.shape[1] > self.max_target_length:
labels = labels[:, :self.max_target_length]
# replace padding with -100 to ignore loss correctly
labels = labels.masked_fill(labels_batch.attention_mask[:, :labels.shape[1]].ne(1), -100)
# if bos token is appended in previous tokenization step,
# cut bos token here as it's append later anyways
if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
labels = labels[:, 1:]
batch["labels"] = labels
return batch
# When initializing the data collator, you can now specify the max length
data_collator = DataCollatorSpeechSeq2SeqWithPadding(
processor=processor,
decoder_start_token_id=model.config.decoder_start_token_id,
max_target_length=448 # Explicitly set the max length
)
import evaluate
metric = evaluate.load("wer")
def compute_metrics(pred):
pred_ids = pred.predictions
label_ids = pred.label_ids
# replace -100 with the pad_token_id
label_ids[label_ids == -100] = tokenizer.pad_token_id
# we do not want to group tokens when computing the metrics
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True)
wer = 100 * metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
from transformers import Seq2SeqTrainingArguments
training_args = Seq2SeqTrainingArguments(
output_dir="./whisper-small-mal", # change to a repo name of your choice
per_device_train_batch_size=16,
gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size
learning_rate=1e-5,
warmup_steps=500,
max_steps=4000,
gradient_checkpointing=True,
fp16=True,
fp16_full_eval=True,
half_precision_backend='auto',
evaluation_strategy="steps",
per_device_eval_batch_size=8,
predict_with_generate=True,
generation_max_length=225,
save_steps=1000,
eval_steps=20,
logging_steps=25,
report_to=["tensorboard"],
load_best_model_at_end=True,
metric_for_best_model="wer",
greater_is_better=False,
push_to_hub=True,
)
from transformers import Seq2SeqTrainer
trainer = Seq2SeqTrainer(
args=training_args,
model=model,
train_dataset=dataset_dict["train"],
eval_dataset=dataset_dict["test"],
data_collator=data_collator,
compute_metrics=compute_metrics,
tokenizer=processor.feature_extractor,
)
processor.save_pretrained(training_args.output_dir)
trainer.train()
|