#!/usr/bin/env python3 import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from num2words import num2words as n2w from transformers import AutoFeatureExtractor, AutoModelForCTC, pipeline, Wav2Vec2Processor, Wav2Vec2ProcessorWithLM, Wav2Vec2FeatureExtractor # from pyctcdecode import BeamSearchDecoderCTC def log_results(result: Dataset, args: Dict[str, str]): """DO NOT CHANGE. This function computes and logs the result metrics.""" log_outputs = args.log_outputs lm = "withLM" if args.use_lm else "noLM" model_id = args.model_id.replace("/", "_").replace(".", "") dataset_id = "_".join([model_id] + args.dataset.split("/") + [args.config, args.split, lm]) # load metric wer = load_metric("wer") cer = load_metric("cer") # compute metrics wer_result = wer.compute(references=result["target"], predictions=result["prediction"]) cer_result = cer.compute(references=result["target"], predictions=result["prediction"]) # print & log results result_str = f"{dataset_id}\nWER: {wer_result}\nCER: {cer_result}" print(result_str) with open(f"{dataset_id}_eval_results.txt", "w") as f: f.write(result_str) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: pred_file = f"log_{dataset_id}_predictions.txt" target_file = f"log_{dataset_id}_targets.txt" with open(pred_file, "w") as p, open(target_file, "w") as t: # mapping function to write output def write_to_file(batch, i): p.write(f"{i}" + "\n") p.write(batch["prediction"] + "\n") t.write(f"{i}" + "\n") t.write(batch["target"] + "\n") result.map(write_to_file, with_indices=True) def normalize_text(text: str, dataset: str) -> str: """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text.""" chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\'\–\_\\\+\#\/]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training text = re.sub(chars_to_ignore_regex, "", text.lower()) + " " if dataset.lower().endswith("nst"): text = text.lower() text = text.replace("(...vær stille under dette opptaket...)", "") text = re.sub('[áàâ]', 'a', text) text = re.sub('[ä]', 'æ', text) text = re.sub('[éèëê]', 'e', text) text = re.sub('[íìïî]', 'i', text) text = re.sub('[óòöô]', 'o', text) text = re.sub('[ö]', 'ø', text) text = re.sub('[ç]', 'c', text) text = re.sub('[úùüû]', 'u', text) # text = re.sub('\\(?=(Punktum|Komma|Utropstegn|Spørsmålstegn))', ' ', text) text = re.sub('\s+', ' ', text) elif dataset.lower().endswith("npsc"): text = re.sub('[áàâ]', 'a', text) text = re.sub('[ä]', 'æ', text) text = re.sub('[éèëê]', 'e', text) text = re.sub('[íìïî]', 'i', text) text = re.sub('[óòöô]', 'o', text) text = re.sub('[ö]', 'ø', text) text = re.sub('[ç]', 'c', text) text = re.sub('[úùüû]', 'u', text) text = re.sub('\s+', ' ', text) elif dataset.lower().endswith("fleurs"): text = re.sub('[áàâ]', 'a', text) text = re.sub('[ä]', 'æ', text) text = re.sub('[éèëê]', 'e', text) text = re.sub('[íìïî]', 'i', text) text = re.sub('[óòöô]', 'o', text) text = re.sub('[ö]', 'ø', text) text = re.sub('[ç]', 'c', text) text = re.sub('[úùüû]', 'u', text) text = re.compile(r"-?[1-9][\d.]*").sub(lambda x: n2w(x.group(0), lang="no"), text) text = re.sub('\s+', ' ', text) text = re.sub("", "e", text) text = re.sub("", "m", text) text = re.sub("", "q", text) text = re.sub("", "i", text) # # In addition, we can normalize the target text, e.g. removing new lines characters etc... # # note that order is important here! # token_sequences_to_ignore = ["\n\n", "\n", " ", " "] # for t in token_sequences_to_ignore: # text = " ".join(text.split(t)) return text def main(args): # load dataset dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id) sampling_rate = feature_extractor.sampling_rate # resample audio dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate)) # load eval pipeline if args.device is None: args.device = 0 if torch.cuda.is_available() else -1 # asr = pipeline("automatic-speech-recognition", model=args.model_id, device=args.device) model_instance = AutoModelForCTC.from_pretrained(args.model_id) if args.use_lm: processor = Wav2Vec2ProcessorWithLM.from_pretrained(args.model_id) decoder = processor.decoder else: processor = Wav2Vec2Processor.from_pretrained(args.model_id) decoder = None asr = pipeline( "automatic-speech-recognition", model=model_instance, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, decoder=decoder, device=args.device ) # feature_extractor_dict, _ = Wav2Vec2FeatureExtractor.get_feature_extractor_dict(args.model_id) # feature_extractor_dict["processor_class"] = "Wav2Vec2Processor" if not args.use_lm else "Wav2Vec2ProcessorWithLM" # feature_extractor = Wav2Vec2FeatureExtractor.from_dict(feature_extractor_dict) # asr = pipeline("automatic-speech-recognition", model=args.model_id, feature_extractor=feature_extractor, device=args.device, decoder=BeamSearchDecoderCTC.load_from_dir("./")) # map function to decode audio def map_to_pred(batch): prediction = asr( batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) batch["prediction"] = prediction[args.text_column] batch["target"] = normalize_text(args.text_column, args.dataset) return batch # run inference on all examples result = dataset.map(map_to_pred, remove_columns=dataset.column_names) # compute and log_results # do not change function below log_results(result, args) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--text_column", type=str, default="text", help="Column name containing the transcription." ) parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) parser.add_argument( "--use_lm", action="store_true", help="If defined, use included language model as the decoder." ) args = parser.parse_args() main(args)