yuchen005 commited on
Commit
fe09823
1 Parent(s): 1cbfe4d

Upload 34 files

Browse files
decoding.py ADDED
@@ -0,0 +1,1006 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from typing import Dict, List, Tuple, Iterable, Optional, Sequence, Union, TYPE_CHECKING
3
+
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from torch import Tensor
8
+ from torch.distributions import Categorical
9
+
10
+ from .audio import CHUNK_LENGTH
11
+ from .tokenizer import Tokenizer, get_tokenizer
12
+ from .utils import compression_ratio
13
+
14
+ if TYPE_CHECKING:
15
+ from .model import Whisper
16
+
17
+
18
+ @torch.no_grad()
19
+ def detect_language(model: "Whisper", mel: Tensor, tokenizer: Tokenizer = None) -> Tuple[Tensor, List[dict]]:
20
+ """
21
+ Detect the spoken language in the audio, and return them as list of strings, along with the ids
22
+ of the most probable language tokens and the probability distribution over all language tokens.
23
+ This is performed outside the main decode loop in order to not interfere with kv-caching.
24
+
25
+ Returns
26
+ -------
27
+ language_tokens : Tensor, shape = (n_audio,)
28
+ ids of the most probable language tokens, which appears after the startoftranscript token.
29
+ language_probs : List[Dict[str, float]], length = n_audio
30
+ list of dictionaries containing the probability distribution over all languages.
31
+ """
32
+ if tokenizer is None:
33
+ tokenizer = get_tokenizer(model.is_multilingual)
34
+ if tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence:
35
+ raise ValueError(f"This model doesn't have language tokens so it can't perform lang id")
36
+
37
+ single = mel.ndim == 2
38
+ if single:
39
+ mel = mel.unsqueeze(0)
40
+
41
+ # skip encoder forward pass if already-encoded audio features were given
42
+ if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state):
43
+ mel = model.encoder(mel)
44
+
45
+ # forward pass using a single token, startoftranscript
46
+ n_audio = mel.shape[0]
47
+ x = torch.tensor([[tokenizer.sot]] * n_audio).to(mel.device) # [n_audio, 1]
48
+ logits = model.logits(x, mel)[:, 0]
49
+
50
+ # collect detected languages; suppress all non-language tokens
51
+ mask = torch.ones(logits.shape[-1], dtype=torch.bool)
52
+ mask[list(tokenizer.all_language_tokens)] = False
53
+ logits[:, mask] = -np.inf
54
+ language_tokens = logits.argmax(dim=-1)
55
+ language_token_probs = logits.softmax(dim=-1).cpu()
56
+ language_probs = [
57
+ {
58
+ c: language_token_probs[i, j].item()
59
+ for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes)
60
+ }
61
+ for i in range(n_audio)
62
+ ]
63
+
64
+ if single:
65
+ language_tokens = language_tokens[0]
66
+ language_probs = language_probs[0]
67
+
68
+ return language_tokens, language_probs
69
+
70
+
71
+ @dataclass(frozen=True)
72
+ class DecodingOptions:
73
+ task: str = "transcribe" # whether to perform X->X "transcribe" or X->English "translate"
74
+ language: Optional[str] = None # language that the audio is in; uses detected language if None
75
+
76
+ # sampling-related options
77
+ temperature: float = 0.0
78
+ sample_len: Optional[int] = None # maximum number of tokens to sample
79
+ best_of: Optional[int] = None # number of independent samples to collect, when t > 0
80
+ beam_size: Optional[int] = None # number of beams in beam search, when t == 0
81
+ patience: Optional[float] = None # patience in beam search (https://arxiv.org/abs/2204.05424)
82
+
83
+ # options for ranking generations (either beams or best-of-N samples)
84
+ length_penalty: Optional[float] = None # "alpha" in Google NMT, None defaults to length norm
85
+
86
+ # prompt, prefix, and token suppression
87
+ prompt: Optional[Union[str, List[int]]] = None # text or tokens for the previous context
88
+ prefix: Optional[Union[str, List[int]]] = None # text or tokens to prefix the current context
89
+ suppress_blank: bool = True # this will suppress blank outputs
90
+
91
+ # list of tokens ids (or comma-separated token ids) to suppress
92
+ # "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()`
93
+ suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1"
94
+
95
+ # timestamp sampling options
96
+ without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only
97
+ max_initial_timestamp: Optional[float] = 1.0 # the initial timestamp cannot be later than this
98
+
99
+ # implementation details
100
+ fp16: bool = True # use fp16 for most of the calculation
101
+
102
+
103
+ @dataclass(frozen=True)
104
+ class DecodingResult:
105
+ audio_features: Tensor
106
+ language: str
107
+ language_probs: Optional[Dict[str, float]] = None
108
+ tokens: List[int] = field(default_factory=list)
109
+ text: str = ""
110
+ avg_logprob: float = np.nan
111
+ no_speech_prob: float = np.nan
112
+ temperature: float = np.nan
113
+ compression_ratio: float = np.nan
114
+
115
+
116
+ class Inference:
117
+ def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor:
118
+ """Perform a forward pass on the decoder and return per-token logits"""
119
+ raise NotImplementedError
120
+
121
+ def rearrange_kv_cache(self, source_indices) -> None:
122
+ """Update the key-value cache according to the updated beams"""
123
+ raise NotImplementedError
124
+
125
+ def cleanup_caching(self) -> None:
126
+ """Clean up any resources or hooks after decoding is finished"""
127
+ pass
128
+
129
+
130
+ class PyTorchInference(Inference):
131
+ def __init__(self, model: "Whisper", initial_token_length: int):
132
+ self.model: "Whisper" = model
133
+ self.initial_token_length = initial_token_length
134
+ self.kv_cache = {}
135
+ self.hooks = []
136
+
137
+ def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor:
138
+ if not self.kv_cache:
139
+ self.kv_cache, self.hooks = self.model.install_kv_cache_hooks()
140
+
141
+ if tokens.shape[-1] > self.initial_token_length:
142
+ # only need to use the last token except in the first forward pass
143
+ tokens = tokens[:, -1:]
144
+
145
+ return self.model.decoder(tokens, audio_features, kv_cache=self.kv_cache)
146
+
147
+ def cleanup_caching(self):
148
+ for hook in self.hooks:
149
+ hook.remove()
150
+
151
+ self.kv_cache = {}
152
+ self.hooks = []
153
+
154
+ def rearrange_kv_cache(self, source_indices):
155
+ for module, tensor in self.kv_cache.items():
156
+ # update the key/value cache to contain the selected sequences
157
+ self.kv_cache[module] = tensor[source_indices].detach()
158
+
159
+
160
+ class SequenceRanker:
161
+ def rank(self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]) -> List[int]:
162
+ """
163
+ Given a list of groups of samples and their cumulative log probabilities,
164
+ return the indices of the samples in each group to select as the final result
165
+ """
166
+ raise NotImplementedError
167
+
168
+
169
+ class MaximumLikelihoodRanker(SequenceRanker):
170
+ """
171
+ Select the sample with the highest log probabilities, penalized using either
172
+ a simple length normalization or Google NMT paper's length penalty
173
+ """
174
+
175
+ def __init__(self, length_penalty: Optional[float]):
176
+ self.length_penalty = length_penalty
177
+
178
+ def rank(self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]):
179
+ def scores(logprobs, lengths):
180
+ result = []
181
+ for logprob, length in zip(logprobs, lengths):
182
+ if self.length_penalty is None:
183
+ penalty = length
184
+ else:
185
+ # from the Google NMT paper
186
+ penalty = ((5 + length) / 6) ** self.length_penalty
187
+ result.append(logprob / penalty)
188
+ return result
189
+
190
+ # get the sequence with the highest score
191
+ lengths = [[len(t) for t in s] for s in tokens]
192
+ return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)]
193
+
194
+
195
+ class TokenDecoder:
196
+ def reset(self):
197
+ """Initialize any stateful variables for decoding a new sequence"""
198
+
199
+ def update(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor) -> Tuple[Tensor, bool]:
200
+ """Specify how to select the next token, based on the current trace and logits
201
+
202
+ Parameters
203
+ ----------
204
+ tokens : Tensor, shape = (n_batch, current_sequence_length)
205
+ all tokens in the context so far, including the prefix and sot_sequence tokens
206
+
207
+ logits : Tensor, shape = (n_batch, vocab_size)
208
+ per-token logits of the probability distribution at the current step
209
+
210
+ sum_logprobs : Tensor, shape = (n_batch)
211
+ cumulative log probabilities for each sequence
212
+
213
+ Returns
214
+ -------
215
+ tokens : Tensor, shape = (n_batch, current_sequence_length + 1)
216
+ the tokens, appended with the selected next token
217
+
218
+ completed : bool
219
+ True if all sequences has reached the end of text
220
+
221
+ """
222
+ raise NotImplementedError
223
+
224
+ def finalize(
225
+ self, tokens: Tensor, sum_logprobs: Tensor
226
+ ) -> Tuple[Sequence[Sequence[Tensor]], List[List[float]]]:
227
+ """Finalize search and return the final candidate sequences
228
+
229
+ Parameters
230
+ ----------
231
+ tokens : Tensor, shape = (n_audio, n_group, current_sequence_length)
232
+ all tokens in the context so far, including the prefix and sot_sequence
233
+
234
+ sum_logprobs : Tensor, shape = (n_audio, n_group)
235
+ cumulative log probabilities for each sequence
236
+
237
+ Returns
238
+ -------
239
+ tokens : Sequence[Sequence[Tensor]], length = n_audio
240
+ sequence of Tensors containing candidate token sequences, for each audio input
241
+
242
+ sum_logprobs : List[List[float]], length = n_audio
243
+ sequence of cumulative log probabilities corresponding to the above
244
+
245
+ """
246
+ raise NotImplementedError
247
+
248
+
249
+ class GreedyDecoder(TokenDecoder):
250
+ def __init__(self, temperature: float, eot: int):
251
+ self.temperature = temperature
252
+ self.eot = eot
253
+
254
+ def update(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor) -> Tuple[Tensor, bool]:
255
+ temperature = self.temperature
256
+ if temperature == 0:
257
+ next_tokens = logits.argmax(dim=-1)
258
+ else:
259
+ next_tokens = Categorical(logits=logits / temperature).sample()
260
+
261
+ logprobs = F.log_softmax(logits.float(), dim=-1)
262
+ current_logprobs = logprobs[torch.arange(logprobs.shape[0]), next_tokens]
263
+ sum_logprobs += current_logprobs * (tokens[:, -1] != self.eot)
264
+
265
+ next_tokens[tokens[:, -1] == self.eot] = self.eot
266
+ tokens = torch.cat([tokens, next_tokens[:, None]], dim=-1)
267
+
268
+ completed = (tokens[:, -1] == self.eot).all()
269
+ return tokens, completed
270
+
271
+ def finalize(self, tokens: Tensor, sum_logprobs: Tensor):
272
+ # make sure each sequence has at least one EOT token at the end
273
+ tokens = F.pad(tokens, (0, 1), value=self.eot)
274
+ return tokens, sum_logprobs.tolist()
275
+
276
+
277
+ class BeamSearchDecoder(TokenDecoder):
278
+ def __init__(self, beam_size: int, eot: int, inference: Inference, patience: Optional[float] = None):
279
+ self.beam_size = beam_size
280
+ self.eot = eot
281
+ self.inference = inference
282
+ self.patience = patience or 1.0
283
+ self.max_candidates: int = round(beam_size * self.patience)
284
+ self.finished_sequences = None
285
+
286
+ assert self.max_candidates > 0, f"Invalid beam size ({beam_size}) or patience ({patience})"
287
+
288
+ def reset(self):
289
+ self.finished_sequences = None
290
+
291
+ def update(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor) -> Tuple[Tensor, bool]:
292
+ if tokens.shape[0] % self.beam_size != 0:
293
+ raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0")
294
+
295
+ n_audio = tokens.shape[0] // self.beam_size
296
+ if self.finished_sequences is None: # for the first update
297
+ self.finished_sequences = [{} for _ in range(n_audio)]
298
+
299
+ logprobs = F.log_softmax(logits.float(), dim=-1)
300
+ next_tokens, source_indices, finished_sequences = [], [], []
301
+ for i in range(n_audio):
302
+ scores, sources, finished = {}, {}, {}
303
+
304
+ # STEP 1: calculate the cumulative log probabilities for possible candidates
305
+ for j in range(self.beam_size):
306
+ idx = i * self.beam_size + j
307
+ prefix = tokens[idx].tolist()
308
+ for logprob, token in zip(*logprobs[idx].topk(self.beam_size + 1)):
309
+ new_logprob = (sum_logprobs[idx] + logprob).item()
310
+ sequence = tuple(prefix + [token.item()])
311
+ scores[sequence] = new_logprob
312
+ sources[sequence] = idx
313
+
314
+ # STEP 2: rank the candidates and keep the top beam_size sequences for each audio
315
+ saved = 0
316
+ for sequence in sorted(scores, key=scores.get, reverse=True):
317
+ if sequence[-1] == self.eot:
318
+ finished[sequence] = scores[sequence]
319
+ else:
320
+ sum_logprobs[len(next_tokens)] = scores[sequence]
321
+ next_tokens.append(sequence)
322
+ source_indices.append(sources[sequence])
323
+
324
+ saved += 1
325
+ if saved == self.beam_size:
326
+ break
327
+
328
+ finished_sequences.append(finished)
329
+
330
+ tokens = torch.tensor(next_tokens, device=tokens.device)
331
+ self.inference.rearrange_kv_cache(source_indices)
332
+
333
+ # add newly finished sequences to self.finished_sequences
334
+ assert len(self.finished_sequences) == len(finished_sequences)
335
+ for previously_finished, newly_finished in zip(self.finished_sequences, finished_sequences):
336
+ for seq in sorted(newly_finished, key=newly_finished.get, reverse=True):
337
+ if len(previously_finished) >= self.max_candidates:
338
+ break # the candidate list is full
339
+ previously_finished[seq] = newly_finished[seq]
340
+
341
+ # mark as completed if all audio has enough number of samples
342
+ completed = all(
343
+ len(sequences) >= self.max_candidates for sequences in self.finished_sequences
344
+ )
345
+ return tokens, completed
346
+
347
+ def finalize(self, preceding_tokens: Tensor, sum_logprobs: Tensor):
348
+ # collect all finished sequences, including patience, and add unfinished ones if not enough
349
+ sum_logprobs = sum_logprobs.cpu()
350
+ for i, sequences in enumerate(self.finished_sequences):
351
+ if len(sequences) < self.beam_size: # when not enough sequences are finished
352
+ for j in list(np.argsort(sum_logprobs[i]))[::-1]:
353
+ sequence = preceding_tokens[i, j].tolist() + [self.eot]
354
+ sequences[tuple(sequence)] = sum_logprobs[i][j].item()
355
+ if len(sequences) >= self.beam_size:
356
+ break
357
+
358
+ # print(f'self.finished_sequences = {self.finished_sequences}')
359
+
360
+ tokens: List[List[Tensor]] = [
361
+ [torch.tensor(seq) for seq in sequences.keys()] for sequences in self.finished_sequences
362
+ ]
363
+ sum_logprobs: List[List[float]] = [
364
+ list(sequences.values()) for sequences in self.finished_sequences
365
+ ]
366
+ return tokens, sum_logprobs
367
+
368
+
369
+ class LogitFilter:
370
+ def apply(self, logits: Tensor, tokens: Tensor) -> None:
371
+ """Apply any filtering or masking to logits in-place
372
+
373
+ Parameters
374
+ ----------
375
+ logits : Tensor, shape = (n_batch, vocab_size)
376
+ per-token logits of the probability distribution at the current step
377
+
378
+ tokens : Tensor, shape = (n_batch, current_sequence_length)
379
+ all tokens in the context so far, including the prefix and sot_sequence tokens
380
+
381
+ """
382
+ raise NotImplementedError
383
+
384
+
385
+ class SuppressBlank(LogitFilter):
386
+ def __init__(self, tokenizer: Tokenizer, sample_begin: int):
387
+ self.tokenizer = tokenizer
388
+ self.sample_begin = sample_begin
389
+
390
+ def apply(self, logits: Tensor, tokens: Tensor):
391
+ if tokens.shape[1] == self.sample_begin:
392
+ logits[:, self.tokenizer.encode(" ") + [self.tokenizer.eot]] = -np.inf
393
+
394
+
395
+ class SuppressTokens(LogitFilter):
396
+ def __init__(self, suppress_tokens: Sequence[int]):
397
+ self.suppress_tokens = list(suppress_tokens)
398
+
399
+ def apply(self, logits: Tensor, tokens: Tensor):
400
+ logits[:, self.suppress_tokens] = -np.inf
401
+
402
+
403
+ class ApplyTimestampRules(LogitFilter):
404
+ def __init__(
405
+ self, tokenizer: Tokenizer, sample_begin: int, max_initial_timestamp_index: Optional[int]
406
+ ):
407
+ self.tokenizer = tokenizer
408
+ self.sample_begin = sample_begin
409
+ self.max_initial_timestamp_index = max_initial_timestamp_index
410
+
411
+ def apply(self, logits: Tensor, tokens: Tensor):
412
+ # suppress <|notimestamps|> which is handled by without_timestamps
413
+ if self.tokenizer.no_timestamps is not None:
414
+ logits[:, self.tokenizer.no_timestamps] = -np.inf
415
+
416
+ # timestamps have to appear in pairs, except directly before EOT; mask logits accordingly
417
+ for k in range(tokens.shape[0]):
418
+ seq = [t for t in tokens[k, self.sample_begin :].tolist()]
419
+ last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.tokenizer.timestamp_begin
420
+ penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.tokenizer.timestamp_begin
421
+
422
+ if last_was_timestamp:
423
+ if penultimate_was_timestamp: # has to be non-timestamp
424
+ logits[k, self.tokenizer.timestamp_begin :] = -np.inf
425
+ else: # cannot be normal text tokens
426
+ logits[k, : self.tokenizer.eot] = -np.inf
427
+
428
+ # apply the `max_initial_timestamp` option
429
+ if tokens.shape[1] == self.sample_begin and self.max_initial_timestamp_index is not None:
430
+ last_allowed = self.tokenizer.timestamp_begin + self.max_initial_timestamp_index
431
+ logits[:, last_allowed + 1 :] = -np.inf
432
+
433
+ # if sum of probability over timestamps is above any other token, sample timestamp
434
+ logprobs = F.log_softmax(logits.float(), dim=-1)
435
+ for k in range(tokens.shape[0]):
436
+ timestamp_logprob = logprobs[k, self.tokenizer.timestamp_begin :].logsumexp(dim=-1)
437
+ max_text_token_logprob = logprobs[k, : self.tokenizer.timestamp_begin].max()
438
+ if timestamp_logprob > max_text_token_logprob:
439
+ logits[k, : self.tokenizer.timestamp_begin] = -np.inf
440
+
441
+
442
+ class DecodingTask:
443
+ inference: Inference
444
+ sequence_ranker: SequenceRanker
445
+ decoder: TokenDecoder
446
+ logit_filters: List[LogitFilter]
447
+
448
+ def __init__(self, model: "Whisper", options: DecodingOptions):
449
+ self.model = model
450
+
451
+ language = options.language or "en"
452
+ tokenizer = get_tokenizer(model.is_multilingual, language=language, task=options.task)
453
+ self.tokenizer: Tokenizer = tokenizer
454
+ self.options: DecodingOptions = self._verify_options(options)
455
+
456
+ self.n_group: int = options.beam_size or options.best_of or 1
457
+ self.n_ctx: int = model.dims.n_text_ctx
458
+ self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2
459
+
460
+ self.sot_sequence: Tuple[int] = tokenizer.sot_sequence
461
+ if self.options.without_timestamps:
462
+ self.sot_sequence = tokenizer.sot_sequence_including_notimestamps
463
+
464
+ self.initial_tokens: Tuple[int] = self._get_initial_tokens()
465
+ self.sample_begin: int = len(self.initial_tokens)
466
+ self.sot_index: int = self.initial_tokens.index(tokenizer.sot)
467
+
468
+ # inference: implements the forward pass through the decoder, including kv caching
469
+ self.inference = PyTorchInference(model, len(self.initial_tokens))
470
+
471
+ # sequence ranker: implements how to rank a group of sampled sequences
472
+ self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty)
473
+
474
+ # decoder: implements how to select the next tokens, given the autoregressive distribution
475
+ if options.beam_size is not None:
476
+ self.decoder = BeamSearchDecoder(
477
+ options.beam_size, tokenizer.eot, self.inference, options.patience
478
+ )
479
+ else:
480
+ self.decoder = GreedyDecoder(options.temperature, tokenizer.eot)
481
+
482
+ # logit filters: applies various rules to suppress or penalize certain tokens
483
+ self.logit_filters = []
484
+ if self.options.suppress_blank:
485
+ self.logit_filters.append(SuppressBlank(self.tokenizer, self.sample_begin))
486
+ if self.options.suppress_tokens:
487
+ self.logit_filters.append(SuppressTokens(self._get_suppress_tokens()))
488
+ if not options.without_timestamps:
489
+ precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds
490
+ max_initial_timestamp_index = None
491
+ if options.max_initial_timestamp:
492
+ max_initial_timestamp_index = round(self.options.max_initial_timestamp / precision)
493
+ self.logit_filters.append(
494
+ ApplyTimestampRules(tokenizer, self.sample_begin, max_initial_timestamp_index)
495
+ )
496
+
497
+ def _verify_options(self, options: DecodingOptions) -> DecodingOptions:
498
+ if options.beam_size is not None and options.best_of is not None:
499
+ raise ValueError("beam_size and best_of can't be given together")
500
+ if options.temperature == 0:
501
+ if options.best_of is not None:
502
+ raise ValueError("best_of with greedy sampling (T=0) is not compatible")
503
+ if options.patience is not None and options.beam_size is None:
504
+ raise ValueError("patience requires beam_size to be given")
505
+ if options.length_penalty is not None and not (0 <= options.length_penalty <= 1):
506
+ raise ValueError("length_penalty (alpha) should be a value between 0 and 1")
507
+
508
+ return options
509
+
510
+ def _get_initial_tokens(self) -> Tuple[int]:
511
+ tokens = list(self.sot_sequence)
512
+ prefix = self.options.prefix
513
+ prompt = self.options.prompt
514
+
515
+ if prefix:
516
+ prefix_tokens = (
517
+ self.tokenizer.encode(" " + prefix.strip()) if isinstance(prefix, str) else prefix
518
+ )
519
+ if self.sample_len is not None:
520
+ max_prefix_len = self.n_ctx // 2 - self.sample_len
521
+ prefix_tokens = prefix_tokens[-max_prefix_len:]
522
+ tokens = tokens + prefix_tokens
523
+
524
+ if prompt:
525
+ prompt_tokens = (
526
+ self.tokenizer.encode(" " + prompt.strip()) if isinstance(prompt, str) else prompt
527
+ )
528
+ tokens = [self.tokenizer.sot_prev] + prompt_tokens[-(self.n_ctx // 2 - 1) :] + tokens
529
+
530
+ return tuple(tokens)
531
+
532
+ def _get_suppress_tokens(self) -> Tuple[int]:
533
+ suppress_tokens = self.options.suppress_tokens
534
+
535
+ if isinstance(suppress_tokens, str):
536
+ suppress_tokens = [int(t) for t in suppress_tokens.split(",")]
537
+
538
+ if -1 in suppress_tokens:
539
+ suppress_tokens = [t for t in suppress_tokens if t >= 0]
540
+ suppress_tokens.extend(self.tokenizer.non_speech_tokens)
541
+ elif suppress_tokens is None or len(suppress_tokens) == 0:
542
+ suppress_tokens = [] # interpret empty string as an empty list
543
+ else:
544
+ assert isinstance(suppress_tokens, list), "suppress_tokens must be a list"
545
+
546
+ suppress_tokens.extend(
547
+ [self.tokenizer.sot, self.tokenizer.sot_prev, self.tokenizer.sot_lm]
548
+ )
549
+ if self.tokenizer.no_speech is not None:
550
+ # no-speech probability is collected separately
551
+ suppress_tokens.append(self.tokenizer.no_speech)
552
+
553
+ return tuple(sorted(set(suppress_tokens)))
554
+
555
+ def _get_audio_features(self, mel: Tensor):
556
+ if self.options.fp16:
557
+ mel = mel.half()
558
+
559
+ if mel.shape[-2:] == (self.model.dims.n_audio_ctx, self.model.dims.n_audio_state):
560
+ # encoded audio features are given; skip audio encoding
561
+ audio_features = mel
562
+ else:
563
+ audio_features = self.model.encoder(mel)
564
+
565
+ if audio_features.dtype != (torch.float16 if self.options.fp16 else torch.float32):
566
+ return TypeError(f"audio_features has an incorrect dtype: {audio_features.dtype}")
567
+
568
+ return audio_features
569
+
570
+ def _detect_language(self, audio_features: Tensor, tokens: Tensor):
571
+ languages = [self.options.language] * audio_features.shape[0]
572
+ lang_probs = None
573
+
574
+ if self.options.language is None or self.options.task == "lang_id":
575
+ lang_tokens, lang_probs = self.model.detect_language(audio_features, self.tokenizer)
576
+ languages = [max(probs, key=probs.get) for probs in lang_probs]
577
+ if self.options.language is None:
578
+ tokens[:, self.sot_index + 1] = lang_tokens # write language tokens
579
+
580
+ return languages, lang_probs
581
+
582
+ def _main_loop(self, audio_features: Tensor, tokens: Tensor):
583
+ assert audio_features.shape[0] == tokens.shape[0]
584
+ n_batch = tokens.shape[0]
585
+ sum_logprobs: Tensor = torch.zeros(n_batch, device=audio_features.device)
586
+ no_speech_probs = [np.nan] * n_batch
587
+
588
+ try:
589
+ for i in range(self.sample_len):
590
+ logits = self.inference.logits(tokens, audio_features)
591
+
592
+ if i == 0 and self.tokenizer.no_speech is not None: # save no_speech_probs
593
+ probs_at_sot = logits[:, self.sot_index].float().softmax(dim=-1)
594
+ no_speech_probs = probs_at_sot[:, self.tokenizer.no_speech].tolist()
595
+
596
+ # now we need to consider the logits at the last token only
597
+ logits = logits[:, -1]
598
+
599
+ # apply the logit filters, e.g. for suppressing or applying penalty to
600
+ for logit_filter in self.logit_filters:
601
+ logit_filter.apply(logits, tokens)
602
+
603
+ # expand the tokens tensor with the selected next tokens
604
+ tokens, completed = self.decoder.update(tokens, logits, sum_logprobs)
605
+
606
+ if completed or tokens.shape[-1] > self.n_ctx:
607
+ break
608
+ finally:
609
+ self.inference.cleanup_caching()
610
+
611
+ return tokens, sum_logprobs, no_speech_probs
612
+
613
+ @torch.no_grad()
614
+ def run(self, mel: Tensor) -> List[DecodingResult]:
615
+ self.decoder.reset()
616
+ tokenizer: Tokenizer = self.tokenizer
617
+ n_audio: int = mel.shape[0]
618
+
619
+ audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass
620
+ tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1)
621
+
622
+ # detect language if requested, overwriting the language token
623
+ languages, language_probs = self._detect_language(audio_features, tokens)
624
+ if self.options.task == "lang_id":
625
+ return [
626
+ DecodingResult(
627
+ audio_features=features, language=language, language_probs=probs
628
+ )
629
+ for features, language, probs in zip(
630
+ audio_features, languages, language_probs
631
+ )
632
+ ]
633
+
634
+ # repeat text tensors by the group size, for beam search or best-of-n sampling
635
+ audio_features = audio_features.repeat_interleave(self.n_group, dim=0)
636
+ tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device)
637
+
638
+ # call the main sampling loop
639
+ # print(audio_features.shape, tokens.shape) # torch.Size([1, 1500, 1280]) torch.Size([50, 3])
640
+ tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens)
641
+
642
+ # reshape the tensors to have (n_audio, n_group) as the first two dimensions
643
+ audio_features = audio_features[:: self.n_group]
644
+ no_speech_probs = no_speech_probs[:: self.n_group]
645
+ assert audio_features.shape[0] == len(no_speech_probs) == n_audio
646
+
647
+ tokens = tokens.reshape(n_audio, self.n_group, -1)
648
+ sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group)
649
+
650
+ # get the final candidates for each group, and slice between the first sampled token and EOT
651
+ tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
652
+ tokens: List[List[Tensor]] = [
653
+ [t[self.sample_begin: (t == tokenizer.eot).nonzero()[0, 0]] for t in s]
654
+ for s in tokens
655
+ ]
656
+
657
+ # select the top-ranked sample in each group
658
+ selected = self.sequence_ranker.rank(tokens, sum_logprobs)
659
+ tokens: List[List[int]] = [t[i].tolist() for i, t in zip(selected, tokens)]
660
+ texts: List[str] = [tokenizer.decode(t).strip() for t in tokens]
661
+
662
+ sum_logprobs: List[float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]
663
+ avg_logprobs: List[float] = [
664
+ lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs)
665
+ ]
666
+
667
+ fields = (
668
+ texts,
669
+ languages,
670
+ tokens,
671
+ audio_features,
672
+ avg_logprobs,
673
+ no_speech_probs,
674
+ )
675
+ if len(set(map(len, fields))) != 1:
676
+ raise RuntimeError(f"inconsistent result lengths: {list(map(len, fields))}")
677
+
678
+ return [
679
+ DecodingResult(
680
+ audio_features=features,
681
+ language=language,
682
+ tokens=tokens,
683
+ text=text,
684
+ avg_logprob=avg_logprob,
685
+ no_speech_prob=no_speech_prob,
686
+ temperature=self.options.temperature,
687
+ compression_ratio=compression_ratio(text),
688
+ )
689
+ for text, language, tokens, features, avg_logprob, no_speech_prob in zip(
690
+ *fields
691
+ )
692
+ ]
693
+
694
+ @torch.no_grad()
695
+ def run1(self, mel: Tensor) -> List[DecodingResult]:
696
+ self.decoder.reset()
697
+ tokenizer: Tokenizer = self.tokenizer
698
+ n_audio: int = mel.shape[0]
699
+
700
+ audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass
701
+ tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1)
702
+
703
+ # detect language if requested, overwriting the language token
704
+ languages, language_probs = self._detect_language(audio_features, tokens)
705
+ if self.options.task == "lang_id":
706
+ return [
707
+ DecodingResult(audio_features=features, language=language, language_probs=probs)
708
+ for features, language, probs in zip(audio_features, languages, language_probs)
709
+ ]
710
+
711
+ # repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
712
+ audio_features = audio_features.repeat_interleave(self.n_group, dim=0)
713
+ tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device)
714
+
715
+ # call the main sampling loop
716
+ tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens)
717
+
718
+ # reshape the tensors to have (n_audio, n_group) as the first two dimensions
719
+ audio_features = audio_features[:: self.n_group]
720
+ no_speech_probs = no_speech_probs[:: self.n_group]
721
+ assert audio_features.shape[0] == len(no_speech_probs) == n_audio
722
+
723
+ tokens = tokens.reshape(n_audio, self.n_group, -1)
724
+ sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group)
725
+
726
+ # get the final candidates for each group, and slice between the first sampled token and EOT
727
+ tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
728
+ tokens: List[List[Tensor]] = [
729
+ [t[self.sample_begin : (t == tokenizer.eot).nonzero()[0, 0]] for t in s] for s in tokens
730
+ ]
731
+
732
+ # select the top-ranked sample in each group
733
+ # selected = self.sequence_ranker.rank(tokens, sum_logprobs)
734
+ # print(f'selected = {selected}\n')
735
+
736
+ tokens = [[i.tolist() for i in t] for t in tokens]
737
+ avg_logprobs: List[float] = [[ilp / (len(it) + 1) for it, ilp in zip(t, lp)] for t, lp in zip(tokens, sum_logprobs)]
738
+ texts = [[tokenizer.decode(i).strip() for i in t] for t in tokens]
739
+
740
+ # print(f'tokens = {tokens}')
741
+ # print(f'avg_logprobs = {avg_logprobs}')
742
+ # print(f'texts = {texts}')
743
+
744
+ import heapq
745
+ top50_list = [heapq.nlargest(50, range(len(a)), a.__getitem__) for a in avg_logprobs]
746
+ # print(f'top50_list = {top50_list}')
747
+
748
+ texts = [[text[i] for i in top50] for text, top50 in zip(texts, top50_list)]
749
+
750
+ return texts
751
+
752
+ @torch.no_grad()
753
+ def run_wer(self, mel: Tensor) -> List[DecodingResult]:
754
+ self.decoder.reset()
755
+ tokenizer: Tokenizer = self.tokenizer
756
+ n_audio: int = mel.shape[0]
757
+
758
+ audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass
759
+ tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1)
760
+
761
+ # detect language if requested, overwriting the language token
762
+ languages, language_probs = self._detect_language(audio_features, tokens)
763
+ if self.options.task == "lang_id":
764
+ return [
765
+ DecodingResult(audio_features=features, language=language, language_probs=probs)
766
+ for features, language, probs in zip(audio_features, languages, language_probs)
767
+ ]
768
+
769
+ # repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
770
+ audio_features = audio_features.repeat_interleave(self.n_group, dim=0)
771
+ tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device)
772
+
773
+ # call the main sampling loop
774
+ tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens)
775
+
776
+ # reshape the tensors to have (n_audio, n_group) as the first two dimensions
777
+ audio_features = audio_features[:: self.n_group]
778
+ no_speech_probs = no_speech_probs[:: self.n_group]
779
+ assert audio_features.shape[0] == len(no_speech_probs) == n_audio
780
+
781
+ tokens = tokens.reshape(n_audio, self.n_group, -1)
782
+ sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group)
783
+
784
+ # get the final candidates for each group, and slice between the first sampled token and EOT
785
+ tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
786
+ tokens: List[List[Tensor]] = [
787
+ [t[self.sample_begin: (t == tokenizer.eot).nonzero()[0, 0]] for t in s] for s in tokens
788
+ ]
789
+
790
+ # select the top-ranked sample in each group
791
+ # selected = self.sequence_ranker.rank(tokens, sum_logprobs)
792
+ # print(f'selected = {selected}\n')
793
+
794
+ tokens = [[i.tolist() for i in t] for t in tokens]
795
+ avg_logprobs: List[float] = [[ilp / (len(it) + 1) for it, ilp in zip(t, lp)] for t, lp in
796
+ zip(tokens, sum_logprobs)]
797
+ texts = [[tokenizer.decode(i).strip() for i in t] for t in tokens]
798
+
799
+ # print(f'tokens = {tokens}')
800
+ # print(f'avg_logprobs = {avg_logprobs}')
801
+ # print(f'texts = {texts}')
802
+
803
+ import heapq
804
+ top5_list = [heapq.nlargest(5, range(len(a)), a.__getitem__) for a in avg_logprobs]
805
+ # print(f'top5_list = {top5_list}')
806
+
807
+ texts = [[text[i] for i in top5] for text, top5 in zip(texts, top5_list)]
808
+
809
+ return texts
810
+
811
+ @torch.no_grad()
812
+ def run_score(self, mel: Tensor) -> List[DecodingResult]:
813
+ self.decoder.reset()
814
+ tokenizer: Tokenizer = self.tokenizer
815
+ n_audio: int = mel.shape[0]
816
+
817
+ audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass
818
+ tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1)
819
+
820
+ # detect language if requested, overwriting the language token
821
+ languages, language_probs = self._detect_language(audio_features, tokens)
822
+ if self.options.task == "lang_id":
823
+ return [
824
+ DecodingResult(audio_features=features, language=language, language_probs=probs)
825
+ for features, language, probs in zip(audio_features, languages, language_probs)
826
+ ]
827
+
828
+ # repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
829
+ audio_features = audio_features.repeat_interleave(self.n_group, dim=0)
830
+ tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device)
831
+
832
+ # call the main sampling loop
833
+ tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens)
834
+
835
+ # reshape the tensors to have (n_audio, n_group) as the first two dimensions
836
+ audio_features = audio_features[:: self.n_group]
837
+ no_speech_probs = no_speech_probs[:: self.n_group]
838
+ assert audio_features.shape[0] == len(no_speech_probs) == n_audio
839
+
840
+ tokens = tokens.reshape(n_audio, self.n_group, -1)
841
+ sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group)
842
+
843
+ # get the final candidates for each group, and slice between the first sampled token and EOT
844
+ tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs)
845
+ tokens: List[List[Tensor]] = [
846
+ [t[self.sample_begin: (t == tokenizer.eot).nonzero()[0, 0]] for t in s] for s in tokens
847
+ ]
848
+
849
+ # select the top-ranked sample in each group
850
+ # selected = self.sequence_ranker.rank(tokens, sum_logprobs)
851
+ # print(f'selected = {selected}\n')
852
+
853
+ tokens = [[i.tolist() for i in t] for t in tokens]
854
+ avg_logprobs: List[float] = [[ilp / (len(it) + 1) for it, ilp in zip(t, lp)] for t, lp in
855
+ zip(tokens, sum_logprobs)]
856
+ texts = [[tokenizer.decode(i).strip() for i in t] for t in tokens]
857
+
858
+ # print(f'tokens = {tokens}')
859
+ # print(f'avg_logprobs = {avg_logprobs}')
860
+ # print(f'texts = {texts}')
861
+
862
+ import heapq
863
+ top50_list = [heapq.nlargest(80, range(len(a)), a.__getitem__) for a in avg_logprobs]
864
+ # print(f'top50_list = {top50_list}')
865
+
866
+ texts = [[text[i] for i in top50] for text, top50 in zip(texts, top50_list)]
867
+ scores = [[score[i] for i in top50] for score, top50 in zip(avg_logprobs, top50_list)]
868
+
869
+ return texts, scores
870
+
871
+ @torch.no_grad()
872
+ def decode(
873
+ model: "Whisper",
874
+ mel: Tensor,
875
+ options: DecodingOptions = DecodingOptions(),
876
+ **kwargs,
877
+ ) -> Union[DecodingResult, List[DecodingResult]]:
878
+ """
879
+ Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
880
+
881
+ Parameters
882
+ ----------
883
+ model: Whisper
884
+ the Whisper model instance
885
+
886
+ mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
887
+ A tensor containing the Mel spectrogram(s)
888
+
889
+ options: DecodingOptions
890
+ A dataclass that contains all necessary options for decoding 30-second segments
891
+
892
+ Returns
893
+ -------
894
+ result: Union[DecodingResult, List[DecodingResult]]
895
+ The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
896
+ """
897
+ if single := mel.ndim == 2:
898
+ mel = mel.unsqueeze(0)
899
+
900
+ if kwargs:
901
+ options = replace(options, **kwargs)
902
+
903
+ result = DecodingTask(model, options).run(mel)
904
+
905
+ return result[0] if single else result
906
+
907
+ @torch.no_grad()
908
+ def decode1(model: "Whisper", mel: Tensor, options: DecodingOptions = DecodingOptions()) -> Union[DecodingResult, List[DecodingResult]]:
909
+ """
910
+ Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
911
+
912
+ Parameters
913
+ ----------
914
+ model: Whisper
915
+ the Whisper model instance
916
+
917
+ mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
918
+ A tensor containing the Mel spectrogram(s)
919
+
920
+ options: DecodingOptions
921
+ A dataclass that contains all necessary options for decoding 30-second segments
922
+
923
+ Returns
924
+ -------
925
+ result: Union[DecodingResult, List[DecodingResult]]
926
+ The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
927
+ """
928
+ single = mel.ndim == 2
929
+ if single:
930
+ mel = mel.unsqueeze(0)
931
+
932
+ result = DecodingTask(model, options).run1(mel)
933
+
934
+ if single:
935
+ result = result[0]
936
+
937
+ return result
938
+
939
+
940
+ @torch.no_grad()
941
+ def decode_wer(model: "Whisper", mel: Tensor, options: DecodingOptions = DecodingOptions()) -> Union[
942
+ DecodingResult, List[DecodingResult]]:
943
+ """
944
+ Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
945
+
946
+ Parameters
947
+ ----------
948
+ model: Whisper
949
+ the Whisper model instance
950
+
951
+ mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
952
+ A tensor containing the Mel spectrogram(s)
953
+
954
+ options: DecodingOptions
955
+ A dataclass that contains all necessary options for decoding 30-second segments
956
+
957
+ Returns
958
+ -------
959
+ result: Union[DecodingResult, List[DecodingResult]]
960
+ The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
961
+ """
962
+ single = mel.ndim == 2
963
+ if single:
964
+ mel = mel.unsqueeze(0)
965
+
966
+ result = DecodingTask(model, options).run_wer(mel)
967
+
968
+ if single:
969
+ result = result[0]
970
+
971
+ return result
972
+
973
+
974
+ @torch.no_grad()
975
+ def decode_score(model: "Whisper", mel: Tensor, options: DecodingOptions = DecodingOptions()) -> Union[
976
+ DecodingResult, List[DecodingResult]]:
977
+ """
978
+ Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
979
+
980
+ Parameters
981
+ ----------
982
+ model: Whisper
983
+ the Whisper model instance
984
+
985
+ mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
986
+ A tensor containing the Mel spectrogram(s)
987
+
988
+ options: DecodingOptions
989
+ A dataclass that contains all necessary options for decoding 30-second segments
990
+
991
+ Returns
992
+ -------
993
+ result: Union[DecodingResult, List[DecodingResult]]
994
+ The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
995
+ """
996
+ single = mel.ndim == 2
997
+ if single:
998
+ mel = mel.unsqueeze(0)
999
+
1000
+ texts, scores = DecodingTask(model, options).run_score(mel)
1001
+
1002
+ if single:
1003
+ texts = texts[0]
1004
+ scores = scores[0]
1005
+
1006
+ return texts, scores
generate_robust_hp.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import whisper
2
+ import re
3
+ import sys
4
+ import os, random, copy
5
+ import numpy as np
6
+ import torch
7
+ import pandas as pd
8
+ import torchaudio
9
+ from tqdm.notebook import tqdm
10
+ import collections, json
11
+ import editdistance
12
+ from whisper.normalizers import EnglishTextNormalizer
13
+ from argparse import ArgumentParser
14
+ from num2words import num2words
15
+ sys.path.append('/home3/huyuchen/pytorch_workplace/jiwer')
16
+ from jiwer import wer_embdiff
17
+ import fasttext
18
+ from huggingface_hub import hf_hub_download
19
+ from pathlib import Path
20
+ from typing import Optional
21
+ from sentencepiece import SentencePieceProcessor, SentencePieceTrainer
22
+ from sentence_transformers import SentenceTransformer
23
+ from argparse import ArgumentParser
24
+ from evaluate import load
25
+ from lit_gpt.tokenizer import Tokenizer
26
+ eval_wer = load("wer")
27
+ normalizer = EnglishTextNormalizer()
28
+
29
+ checkpoint_dir = Path('/home3/huyuchen/pytorch_workplace/wgpt/checkpoints/Llama-2-7b-hf')
30
+ tokenizer = Tokenizer(checkpoint_dir)
31
+
32
+ sbert_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
33
+
34
+
35
+ def calculate_wer(all_hypo, all_refer):
36
+ return eval_wer.compute(predictions=all_hypo, references=all_refer)
37
+
38
+ def word_emb_diff(reference, hypothesis):
39
+ output, edit_ops = wer_embdiff(reference, hypothesis)
40
+ ref_words, hypo_words = output.references[0], output.hypotheses[0]
41
+
42
+ emb_diffs = []
43
+ for op in edit_ops:
44
+ if op.tag == 'replace':
45
+ ref_word, hypo_word = ref_words[op.src_pos], hypo_words[op.dest_pos]
46
+ elif op.tag == 'delete':
47
+ ref_word, hypo_word = ref_words[op.src_pos], None
48
+ elif op.tag == 'insert':
49
+ ref_word, hypo_word = None, hypo_words[op.dest_pos]
50
+ else:
51
+ continue
52
+
53
+ ref_emb = torch.from_numpy(sbert_model.encode([ref_word])[0]) if ref_word else torch.zeros([384])
54
+ hypo_emb = torch.from_numpy(sbert_model.encode([hypo_word])[0]) if hypo_word else torch.zeros([384])
55
+
56
+ emb_diff = ref_emb - hypo_emb
57
+ emb_diffs.append(emb_diff)
58
+
59
+ # print('word', hypo_emb.mean(), ref_emb.mean(), emb_diff.mean())
60
+
61
+ if len(emb_diffs) == 0:
62
+ return torch.zeros([384])
63
+ else:
64
+ return torch.stack(emb_diffs, dim=0).mean(dim=0)
65
+
66
+ def sent_emb_diff(reference, hypothesis):
67
+ embeddings = sbert_model.encode([reference, hypothesis])
68
+ ref_emb, hypo_emb = torch.from_numpy(embeddings[0]), torch.from_numpy(embeddings[1])
69
+ emb_diff = ref_emb - hypo_emb
70
+ # print('sentence', hypo_emb.mean(), ref_emb.mean(), emb_diff.mean())
71
+
72
+ return emb_diff
73
+
74
+ def generate_prompt(input1, input2):
75
+ return (
76
+ f"Below is the best-hypotheses transcribed from speech recognition system. Please try to revise it using the words which are only included into other-hypothesis, and write the response for the true transcription.\n\n### Best-hypothesis:\n{input1}\n\n### Other-hypothesis:\n{input2}\n\n### Response:\n"
77
+ )
78
+
79
+
80
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
81
+ model = whisper.load_model('large-v2')
82
+
83
+ f_noisy_wav = open(f'noisy_wav.scp', 'r')
84
+ f_clean_wav = open(f'clean_wav.scp', 'r')
85
+ f_text = open(f'text', 'r')
86
+
87
+ id = 0
88
+ pt_file = []
89
+ all_hypo, all_refer = [], []
90
+ for line in f_noisy_wav.readlines():
91
+ utt_id, audio_path = line.strip().split()[0], line.strip().split()[1]
92
+ clean_line = f_clean_wav.readline()
93
+ clean_utt_id, clean_audio_path = clean_line.strip().split()[0], clean_line.strip().split()[1]
94
+ assert clean_utt_id == utt_id, (line, clean_line)
95
+ gt = ' '.join(f_text.readline().strip().split()[1:])
96
+ audio = whisper.load_audio(audio_path)
97
+ audio = whisper.pad_or_trim(audio)
98
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
99
+ options = whisper.DecodingOptions(language='en', beam_size=50)
100
+ texts, confidences = whisper.decode_score(model, mel, options)
101
+
102
+ ## noisy audio feats
103
+ audio_features = model.encoder(mel.unsqueeze(0))[0]
104
+
105
+ ## clean audio feats
106
+ clean_audio = whisper.load_audio(clean_audio_path)
107
+ clean_audio = whisper.pad_or_trim(clean_audio)
108
+ clean_mel = whisper.log_mel_spectrogram(clean_audio).to(model.device)
109
+ clean_audio_features = model.encoder(clean_mel.unsqueeze(0))[0]
110
+
111
+ input, score = [], []
112
+ for text, confidence in zip(texts, confidences):
113
+ if len(input) < 5 and len(text) > 0 and text not in input:
114
+ input.append(text)
115
+ score.append(confidence)
116
+
117
+ # print('before', input, score, len(input))
118
+
119
+ if len(input) < 5:
120
+ options = whisper.DecodingOptions(language='en', temperature=1.2)
121
+ for _ in range(5 - len(input)):
122
+ result = whisper.decode(model, mel, options)
123
+ text, condidence = result.text, result.avg_logprob
124
+ if text in input:
125
+ continue
126
+ inserted = False
127
+ for i in range(len(input)):
128
+ if condidence > score[i]:
129
+ input.insert(i, text)
130
+ score.insert(i, condidence)
131
+ inserted = True
132
+ break
133
+ if not inserted:
134
+ input.append(text)
135
+ score.append(condidence)
136
+
137
+ # print('after ', input, score, len(input))
138
+
139
+ if len(input) < 5:
140
+ num_to_add = 5 - len(input)
141
+ for _ in range(num_to_add):
142
+ rand_id = random.randint(0, len(input) - 1)
143
+ rep_input, rep_score = copy.deepcopy(input[rand_id]), copy.deepcopy(score[rand_id])
144
+ input.insert(rand_id + 1, rep_input)
145
+ score.insert(rand_id + 1, rep_score)
146
+
147
+ for i in range(len(input)):
148
+ try:
149
+ text = normalizer(input[i])
150
+ text = re.sub(r"[-+]?\d*\.?\d+|\d+%?", lambda m: num2words(m.group()), text).replace('%', ' percent')
151
+ except Exception:
152
+ text = normalizer(input[i])
153
+ print(f'input exception: {text}')
154
+ input[i] = text if len(text) > 0 else '<UNK>'
155
+
156
+ try:
157
+ output = normalizer(gt)
158
+ output = re.sub(r"[-+]?\d*\.?\d+|\d+%?", lambda m: num2words(m.group()), output).replace('%', ' percent')
159
+ except Exception:
160
+ output = normalizer(gt)
161
+ print(f'output exception: {output}')
162
+ output = output if len(output) > 0 else '<UNK>'
163
+
164
+ cur_wer = calculate_wer([input[0]], [output])
165
+
166
+ # calculate emb diff
167
+ we_diffs, se_diffs = [], []
168
+ for i in range(5):
169
+ for j in range(i + 1, 5):
170
+ we_diffs.append(word_emb_diff(input[i], input[j]))
171
+ se_diffs.append(sent_emb_diff(input[i], input[j]))
172
+
173
+ we_diff = torch.stack(we_diffs, dim=0) # [10, 384]
174
+ se_diff = torch.stack(se_diffs, dim=0) # [10, 384]
175
+ emb_diff = torch.cat([we_diff, se_diff], dim=0) # [20, 384]
176
+
177
+ # generate ids
178
+ input1 = input[0] + '.'
179
+ input2 = '. '.join(input[1:]) + '.'
180
+
181
+ full_prompt = generate_prompt(input1, input2)
182
+ full_prompt_and_response = full_prompt + output
183
+ encoded_full_prompt = tokenizer.encode(full_prompt, max_length=1024)
184
+ encoded_full_prompt_and_response = tokenizer.encode(full_prompt_and_response, eos=True, max_length=1024)
185
+
186
+ labels = encoded_full_prompt_and_response.clone()
187
+ labels[: len(encoded_full_prompt)] = -1
188
+
189
+
190
+ data = {"id": utt_id, "input_ids": encoded_full_prompt_and_response, "input_ids_no_response": encoded_full_prompt, "labels": labels,
191
+ "input": input, 'ground_truth': output, "am_score": score, 'emb_diff': emb_diff, 'audio_features': noisy_audio_features,
192
+ 'clean_audio_features': clean_audio_features}
193
+
194
+ pt_file.append(data)
195
+
196
+ # calculate wer
197
+ id += 1
198
+ print(f'utterance {id}: wer = {cur_wer}, confidence = {score[0]}')
199
+ all_hypo.append(input[0])
200
+ all_refer.append(output)
201
+
202
+
203
+ torch.save(pt_file, f'/home3/huyuchen/pytorch_workplace/wllama/hypo_paradise_v2/train_rats.pt')
204
+
205
+
206
+ f_noisy_wav.close()
207
+ f_clean_wav.close()
208
+ f_text.close()
209
+
210
+ all_wer = calculate_wer(all_hypo, all_refer)
211
+ print(f'all wer = {all_wer}')
212
+
jiwer/.github/workflows/pythonpackage.yml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2
+ # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3
+
4
+ name: Python package
5
+
6
+ on:
7
+ push:
8
+ branches: [ master ]
9
+ pull_request:
10
+ branches: [ master ]
11
+
12
+ jobs:
13
+ style:
14
+ runs-on: ubuntu-latest
15
+ strategy:
16
+ matrix:
17
+ python-version: [3.7]
18
+
19
+ steps:
20
+ - uses: actions/checkout@v3
21
+ - name: Set up Python ${{ matrix.python-version }}
22
+ uses: actions/setup-python@v4
23
+ with:
24
+ python-version: ${{ matrix.python-version }}
25
+ - name: Install Poetry
26
+ uses: snok/install-poetry@v1
27
+ - name: Setup dependencies
28
+ run: |
29
+ poetry update
30
+ poetry install
31
+ - name: Lint with flake8
32
+ run: |
33
+ # stop the build if there are Python syntax errors or undefined names
34
+ poetry run flake8 jiwer --count --select=E9,F63,F7,F82 --show-source --statistics
35
+ # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
36
+ poetry run flake8 jiwer --count --exit-zero --max-complexity=10 --max-line-length=88 --statistics
37
+ - name: Check formatting with black
38
+ run: |
39
+ poetry run black . --check
40
+
41
+ build:
42
+ runs-on: ubuntu-latest
43
+ strategy:
44
+ matrix:
45
+ python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
46
+
47
+ steps:
48
+ - uses: actions/checkout@v3
49
+ - name: Set up Python ${{ matrix.python-version }}
50
+ uses: actions/setup-python@v4
51
+ with:
52
+ python-version: ${{ matrix.python-version }}
53
+ - name: Install Poetry
54
+ uses: snok/install-poetry@v1
55
+ - name: Install dependencies
56
+ run: |
57
+ poetry run python -m pip install --upgrade pip
58
+ poetry update
59
+ poetry install
60
+ - name: Test with pytest
61
+ run: |
62
+ poetry run python --version
63
+ poetry run pytest
jiwer/.github/workflows/pythonpublish.yml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This workflows will upload a Python Package using Twine when a release is created
2
+ # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3
+
4
+ name: Upload Python Package
5
+
6
+ on:
7
+ release:
8
+ types: [created]
9
+
10
+ jobs:
11
+ deploy:
12
+
13
+ runs-on: ubuntu-latest
14
+
15
+ steps:
16
+ - uses: actions/checkout@v3
17
+ - name: Set up Python
18
+ uses: actions/setup-python@v4
19
+ with:
20
+ python-version: '3.x'
21
+ - name: Install Poetry
22
+ uses: snok/install-poetry@v1
23
+ - name: Install dependencies
24
+ run: |
25
+ poetry run python -m pip install --upgrade pip
26
+ poetry update
27
+ poetry install
28
+ - name: Build and publish
29
+ env:
30
+ POETRY_HTTP_BASIC_PYPI_USERNAME: ${{ secrets.PYPI_USERNAME }}
31
+ POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_API_TOKEN }}
32
+ run: |
33
+ poetry publish --build
jiwer/.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Compiled python modules.
2
+ *.pyc
3
+
4
+ # Setuptools distribution folder.
5
+ /dist/
6
+
7
+ # Python egg metadata, regenerated from source files by setuptools.
8
+ /*.egg-info
9
+
10
+ # playground directory for running local debug code
11
+ playground
12
+
13
+ # poetry .lock file
14
+ poetry.lock
15
+
16
+ # idea specific folders
17
+ .idea
18
+ .vscode
19
+
20
+ # virtual environments
21
+ venv
22
+ .venv
23
+
24
+ # cache folders
25
+ .pytest_cache
26
+ .benchmarks
27
+ /docs/site/
28
+ /site/
jiwer/.mailmap ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # mail map file
2
+ # for cleaner output of e.g. `git shortlog -nes`
3
+
4
+ Nik Vaessen <[email protected]>
5
6
+ Bart van Andel <[email protected]>
jiwer/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright 2018 8x8, Inc.
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
jiwer/README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # JiWER
2
+
3
+ JiWER is a simple and fast python package to evaluate an automatic speech recognition system.
4
+ It supports the following measures:
5
+
6
+ 1. word error rate (WER)
7
+ 2. match error rate (MER)
8
+ 3. word information lost (WIL)
9
+ 4. word information preserved (WIP)
10
+ 5. character error rate (CER)
11
+
12
+ These measures are computed with the use of the minimum-edit distance between one or more reference and hypothesis sentences.
13
+ The minimum-edit distance is calculated using [RapidFuzz](https://github.com/maxbachmann/RapidFuzz), which uses C++ under the hood, and is therefore faster than a pure python implementation.
14
+
15
+ ## Documentation
16
+
17
+ For further info, see the documentation at [jitsi.github.io/jiwer](https://jitsi.github.io/jiwer).
18
+
19
+ ## Installation
20
+
21
+ You should be able to install this package using [poetry](https://python-poetry.org/docs/):
22
+
23
+ ```
24
+ $ poetry add jiwer
25
+ ```
26
+
27
+ Or, if you prefer old-fashioned pip and you're using Python >= `3.7`:
28
+
29
+ ```bash
30
+ $ pip install jiwer
31
+ ```
32
+
33
+ ## Usage
34
+
35
+ The most simple use-case is computing the word error rate between two strings:
36
+
37
+ ```python
38
+ from jiwer import wer
39
+
40
+ reference = "hello world"
41
+ hypothesis = "hello duck"
42
+
43
+ error = wer(reference, hypothesis)
44
+ ```
45
+
46
+ ## Licence
47
+
48
+ The jiwer package is released under the `Apache License, Version 2.0` licence by [8x8](https://www.8x8.com/).
49
+
50
+ For further information, see [`LICENCE`](./LICENSE).
51
+
52
+ ## Reference
53
+
54
+ _For a comparison between WER, MER and WIL, see: \
55
+ Morris, Andrew & Maier, Viktoria & Green, Phil. (2004). [From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.](https://www.researchgate.net/publication/221478089_From_WER_and_RIL_to_MER_and_WIL_improved_evaluation_measures_for_connected_speech_recognition)_
jiwer/docs/cli.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ JiWER provides a simple CLI, which should be available after installation.
3
+
4
+ For details, see `jiwer --help`.
5
+
6
+ ```text
7
+ $ jiwer --help
8
+ Usage: jiwer [OPTIONS]
9
+
10
+ JiWER is a python tool for computing the word-error-rate of ASR systems. To
11
+ use this CLI, store the reference and hypothesis sentences in a text file,
12
+ where each sentence is delimited by a new-line character. The text files are
13
+ expected to have an equal number of lines, unless the `-g` flag is used. The
14
+ `-g` flag joins computation of the WER by doing a global minimal alignment.
15
+
16
+ Options:
17
+ -r, --reference PATH Path to new-line delimited text file of reference
18
+ sentences. [required]
19
+ -h, --hypothesis PATH Path to new-line delimited text file of hypothesis
20
+ sentences. [required]
21
+ -c, --cer Compute CER instead of WER.
22
+ -a, --align Print alignment of each sentence.
23
+ -g, --global Apply a global minimal alignment between reference
24
+ and hypothesis sentences before computing the WER.
25
+ --help Show this message and exit.
26
+ ```
27
+
28
+ Note that the CLI does not support a custom pre-processing (as described below). Any pre-processing
29
+ should be done on the text files manually before calling JiWER when using the CLI.
jiwer/docs/gen_ref_pages.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Generate the code reference pages and navigation."""
2
+
3
+ from pathlib import Path
4
+ import mkdocs_gen_files
5
+
6
+ nav = mkdocs_gen_files.Nav()
7
+
8
+ for path in sorted(Path("jiwer").rglob("*.py")):
9
+ doc_path = path.relative_to("jiwer").with_suffix(".md")
10
+ full_doc_path = Path("reference", doc_path)
11
+
12
+ module_path = path.relative_to("jiwer").with_suffix("")
13
+ parts = list(module_path.parts)
14
+
15
+ if parts[-1] == "__init__" or parts[-1] == "cli":
16
+ continue
17
+
18
+ nav[parts] = doc_path.as_posix()
19
+
20
+ with mkdocs_gen_files.open(full_doc_path, "w") as fd:
21
+ identifier = ".".join(parts)
22
+ print("::: " + identifier, file=fd)
23
+
24
+ mkdocs_gen_files.set_edit_path(full_doc_path, path)
25
+
26
+
27
+ with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file:
28
+ nav_file.writelines(nav.build_literate_nav())
jiwer/docs/index.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # JiWER
2
+
3
+ JiWER is a simple and fast python package to evaluate an automatic speech recognition system.
4
+ It supports the following measures:
5
+
6
+ 1. word error rate (WER)
7
+ 2. match error rate (MER)
8
+ 3. word information lost (WIL)
9
+ 4. word information preserved (WIP)
10
+ 5. character error rate (CER)
11
+
12
+ These measures are computed with the use of the minimum-edit distance between one or more reference and hypothesis sentences.
13
+ The minimum-edit distance is calculated using [RapidFuzz](https://github.com/maxbachmann/RapidFuzz), which uses C++ under the hood, and is therefore faster than a pure python implementation.
14
+
15
+ # Installation
16
+
17
+ You should be able to install this package using [poetry](https://python-poetry.org/docs/):
18
+
19
+ ```
20
+ $ poetry add jiwer
21
+ ```
22
+
23
+ Or, if you prefer old-fashioned pip and you're using Python >= `3.7`:
24
+
25
+ ```bash
26
+ $ pip install jiwer
27
+ ```
28
+
29
+
30
+
31
+
jiwer/docs/requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ mkdocs==1.4.2
2
+ mkdocstrings[python]==0.20.0
3
+ mkdocs-gen-files==0.4.0
4
+ mkdocs-literate-nav==0.6.0
5
+ mkdocs-material==9.1.3
jiwer/docs/usage.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Usage
2
+
3
+ The most simple use-case is computing the word error rate between two strings:
4
+
5
+ ```python
6
+ from jiwer import wer
7
+
8
+ reference = "hello world"
9
+ hypothesis = "hello duck"
10
+
11
+ error = wer(reference, hypothesis)
12
+ ```
13
+
14
+ Similarly, to get other measures:
15
+
16
+ ```python
17
+ import jiwer
18
+
19
+ reference = "hello world"
20
+ hypothesis = "hello duck"
21
+
22
+ wer = jiwer.wer(reference, hypothesis)
23
+ mer = jiwer.mer(reference, hypothesis)
24
+ wil = jiwer.wil(reference, hypothesis)
25
+
26
+ # faster, because `compute_measures` only needs to perform the heavy lifting once:
27
+ output = jiwer.process_words(reference, hypothesis)
28
+ wer = output.wer
29
+ mer = output.mer
30
+ wil = output.wil
31
+ ```
32
+
33
+ You can also compute the WER over multiple sentences:
34
+
35
+ ```python
36
+ from jiwer import wer
37
+
38
+ reference = ["hello world", "i like monthy python"]
39
+ hypothesis = ["hello duck", "i like python"]
40
+
41
+ error = wer(reference, hypothesis)
42
+ ```
43
+
44
+ We also provide the character error rate:
45
+
46
+ ```python
47
+ import jiwer
48
+
49
+ reference = ["i can spell", "i hope"]
50
+ hypothesis = ["i kan cpell", "i hop"]
51
+
52
+ error = jiwer.cer(reference, hypothesis)
53
+
54
+ # if you also want the alignment
55
+ output = jiwer.process_characters(reference, hypothesis)
56
+ error = output.cer
57
+ ```
58
+
59
+ # Alignment
60
+
61
+ With `jiwer.process_words`, you also get the alignment between the reference and hypothesis.
62
+
63
+ We provide the alignment as a list of `(op, ref_start_idx, ref_idx_end, hyp_idx_start, hyp_idx_end)`, where `op` is one of
64
+ `equal`, `replace`, `delete`, or `insert`.
65
+
66
+ This looks like the following:
67
+
68
+ ```python3
69
+ import jiwer
70
+
71
+ out = jiwer.process_words("short one here", "shoe order one")
72
+ print(out.alignments)
73
+ # [[[AlignmentChunk(type='insert', ref_start_idx=0, ref_end_idx=0, hyp_start_idx=0, hyp_end_idx=1), ...]]
74
+ ```
75
+
76
+ To visualize the alignment, you can use `jiwer.visualize_alignment()`
77
+
78
+ For example:
79
+
80
+ ```python3
81
+ import jiwer
82
+
83
+ out = jiwer.process_words(
84
+ ["short one here", "quite a bit of longer sentence"],
85
+ ["shoe order one", "quite bit of an even longest sentence here"],
86
+ )
87
+
88
+ print(jiwer.visualize_alignment(out))
89
+ ```
90
+ Gives the following output
91
+ ```text
92
+ sentence 1
93
+ REF: **** short one here
94
+ HYP: shoe order one ****
95
+ I S D
96
+
97
+ sentence 2
98
+ REF: quite a bit of ** **** longer sentence ****
99
+ HYP: quite * bit of an even longest sentence here
100
+ D I I S I
101
+
102
+ number of sentences: 2
103
+ substitutions=2 deletions=2 insertions=4 hits=5
104
+
105
+ mer=61.54%
106
+ wil=74.75%
107
+ wip=25.25%
108
+ wer=88.89%
109
+ ```
110
+
111
+ Note that it also possible to visualize the character-level alignment, simply use the output of `jiwer.process_characters()` instead.
jiwer/jiwer/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .measures import *
2
+ from .transforms import *
3
+ from .transformations import *
4
+ from .alignment import *
5
+ from .process import *
6
+
7
+ name = "jiwer"
jiwer/jiwer/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (281 Bytes). View file
 
jiwer/jiwer/__pycache__/alignment.cpython-38.pyc ADDED
Binary file (4.97 kB). View file
 
jiwer/jiwer/__pycache__/measures.cpython-38.pyc ADDED
Binary file (11.2 kB). View file
 
jiwer/jiwer/__pycache__/process.cpython-38.pyc ADDED
Binary file (12.2 kB). View file
 
jiwer/jiwer/__pycache__/transformations.cpython-38.pyc ADDED
Binary file (1.19 kB). View file
 
jiwer/jiwer/__pycache__/transforms.cpython-38.pyc ADDED
Binary file (21.8 kB). View file
 
jiwer/jiwer/alignment.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ Utility method to visualize the alignment between one or more reference and hypothesis
21
+ pairs.
22
+ """
23
+
24
+ from typing import Dict, List, Tuple, Union
25
+
26
+
27
+ from jiwer.process import CharacterOutput, WordOutput, AlignmentChunk
28
+
29
+ __all__ = ["visualize_alignment"]
30
+
31
+
32
+ def visualize_alignment(
33
+ output: Union[WordOutput, CharacterOutput],
34
+ show_measures: bool = True,
35
+ skip_correct: bool = True,
36
+ ) -> str:
37
+ """
38
+ Visualize the output of [jiwer.process_words][process.process_words] and
39
+ [jiwer.process_characters][process.process_characters]. The visualization
40
+ shows the alignment between each processed reference and hypothesis pair.
41
+ If `show_measures=True`, the output string will also contain all measures in the
42
+ output.
43
+
44
+ Args:
45
+ output: The processed output of reference and hypothesis pair(s).
46
+ show_measures: If enabled, the visualization will include measures like the WER
47
+ or CER
48
+ skip_correct: If enabled, the visualization will exclude correct reference and hypothesis pairs
49
+
50
+ Returns:
51
+ (str): The visualization as a string
52
+
53
+ Example:
54
+ This code snippet
55
+ ```python
56
+ import jiwer
57
+
58
+ out = jiwer.process_words(
59
+ ["short one here", "quite a bit of longer sentence"],
60
+ ["shoe order one", "quite bit of an even longest sentence here"],
61
+ )
62
+
63
+ print(jiwer.visualize_alignment(out))
64
+ ```
65
+ will produce this visualization:
66
+ ```txt
67
+ sentence 1
68
+ REF: # short one here
69
+ HYP: shoe order one *
70
+ I S D
71
+
72
+ sentence 2
73
+ REF: quite a bit of # # longer sentence #
74
+ HYP: quite * bit of an even longest sentence here
75
+ D I I S I
76
+
77
+ number of sentences: 2
78
+ substitutions=2 deletions=2 insertions=4 hits=5
79
+
80
+ mer=61.54%
81
+ wil=74.75%
82
+ wip=25.25%
83
+ wer=88.89%
84
+ ```
85
+
86
+ When `show_measures=False`, only the alignment will be printed:
87
+
88
+ ```txt
89
+ sentence 1
90
+ REF: # short one here
91
+ HYP: shoe order one *
92
+ I S D
93
+
94
+ sentence 2
95
+ REF: quite a bit of # # longer sentence #
96
+ HYP: quite * bit of an even longest sentence here
97
+ D I I S I
98
+ ```
99
+ """
100
+ references = output.references
101
+ hypothesis = output.hypotheses
102
+ alignment = output.alignments
103
+ is_cer = isinstance(output, CharacterOutput)
104
+
105
+ final_str = ""
106
+ for idx, (gt, hp, chunks) in enumerate(zip(references, hypothesis, alignment)):
107
+ if skip_correct and len(chunks) == 1 and chunks[0].type == "equal":
108
+ continue
109
+
110
+ final_str += f"sentence {idx+1}\n"
111
+ final_str += _construct_comparison_string(
112
+ gt, hp, chunks, include_space_seperator=not is_cer
113
+ )
114
+ final_str += "\n"
115
+
116
+ if show_measures:
117
+ final_str += f"number of sentences: {len(alignment)}\n"
118
+ final_str += f"substitutions={output.substitutions} "
119
+ final_str += f"deletions={output.deletions} "
120
+ final_str += f"insertions={output.insertions} "
121
+ final_str += f"hits={output.hits}\n"
122
+
123
+ if is_cer:
124
+ final_str += f"\ncer={output.cer*100:.2f}%\n"
125
+ else:
126
+ final_str += f"\nmer={output.mer*100:.2f}%"
127
+ final_str += f"\nwil={output.wil*100:.2f}%"
128
+ final_str += f"\nwip={output.wip*100:.2f}%"
129
+ final_str += f"\nwer={output.wer*100:.2f}%\n"
130
+ else:
131
+ # remove last newline
132
+ final_str = final_str[:-1]
133
+
134
+ return final_str
135
+
136
+
137
+ def _construct_comparison_string(
138
+ reference: List[str],
139
+ hypothesis: List[str],
140
+ ops: List[AlignmentChunk],
141
+ include_space_seperator: bool = False,
142
+ ) -> str:
143
+ ref_str = "REF: "
144
+ hyp_str = "HYP: "
145
+ op_str = " "
146
+
147
+ for op in ops:
148
+ if op.type == "equal" or op.type == "substitute":
149
+ ref = reference[op.ref_start_idx : op.ref_end_idx]
150
+ hyp = hypothesis[op.hyp_start_idx : op.hyp_end_idx]
151
+ op_char = " " if op.type == "equal" else "s"
152
+ elif op.type == "delete":
153
+ ref = reference[op.ref_start_idx : op.ref_end_idx]
154
+ hyp = ["*" for _ in range(len(ref))]
155
+ op_char = "d"
156
+ elif op.type == "insert":
157
+ hyp = hypothesis[op.hyp_start_idx : op.hyp_end_idx]
158
+ ref = ["*" for _ in range(len(hyp))]
159
+ op_char = "i"
160
+ else:
161
+ raise ValueError(f"unparseable op name={op.type}")
162
+
163
+ op_chars = [op_char for _ in range(len(ref))]
164
+ for rf, hp, c in zip(ref, hyp, op_chars):
165
+ str_len = max(len(rf), len(hp), len(c))
166
+
167
+ if rf == "*":
168
+ rf = "".join(["*"] * str_len)
169
+ elif hp == "*":
170
+ hp = "".join(["*"] * str_len)
171
+
172
+ ref_str += f"{rf:>{str_len}}"
173
+ hyp_str += f"{hp:>{str_len}}"
174
+ op_str += f"{c.upper():>{str_len}}"
175
+
176
+ if include_space_seperator:
177
+ ref_str += " "
178
+ hyp_str += " "
179
+ op_str += " "
180
+
181
+ if include_space_seperator:
182
+ # remove last space
183
+ return f"{ref_str[:-1]}\n{hyp_str[:-1]}\n{op_str[:-1]}\n"
184
+ else:
185
+ return f"{ref_str}\n{hyp_str}\n{op_str}\n"
jiwer/jiwer/cli.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ Provide a simple CLI wrapper for JiWER. The CLI does not support custom transforms.
21
+ """
22
+
23
+ import click
24
+ import pathlib
25
+
26
+ import jiwer
27
+
28
+
29
+ @click.command()
30
+ @click.option(
31
+ "-r",
32
+ "--reference",
33
+ "reference_file",
34
+ type=pathlib.Path,
35
+ required=True,
36
+ help="Path to new-line delimited text file of reference sentences.",
37
+ )
38
+ @click.option(
39
+ "-h",
40
+ "--hypothesis",
41
+ "hypothesis_file",
42
+ type=pathlib.Path,
43
+ required=True,
44
+ help="Path to new-line delimited text file of hypothesis sentences.",
45
+ )
46
+ @click.option(
47
+ "--cer",
48
+ "-c",
49
+ "compute_cer",
50
+ is_flag=True,
51
+ default=False,
52
+ help="Compute CER instead of WER.",
53
+ )
54
+ @click.option(
55
+ "--align",
56
+ "-a",
57
+ "show_alignment",
58
+ is_flag=True,
59
+ default=False,
60
+ help="Print alignment of each sentence.",
61
+ )
62
+ @click.option(
63
+ "--global",
64
+ "-g",
65
+ "global_alignment",
66
+ is_flag=True,
67
+ default=False,
68
+ help="Apply a global minimal alignment between reference and hypothesis sentences "
69
+ "before computing the WER.",
70
+ )
71
+ def cli(
72
+ reference_file: pathlib.Path,
73
+ hypothesis_file: pathlib.Path,
74
+ compute_cer: bool,
75
+ show_alignment: bool,
76
+ global_alignment: bool,
77
+ ):
78
+ """
79
+ JiWER is a python tool for computing the word-error-rate of ASR systems. To use
80
+ this CLI, store the reference and hypothesis sentences in a text file, where
81
+ each sentence is delimited by a new-line character.
82
+ The text files are expected to have an equal number of lines, unless the `-g` flag
83
+ is used. The `-g` flag joins computation of the WER by doing a global minimal
84
+ alignment.
85
+
86
+ """
87
+ with reference_file.open("r") as f:
88
+ reference_sentences = [
89
+ ln.strip() for ln in f.readlines() if len(ln.strip()) > 1
90
+ ]
91
+
92
+ with hypothesis_file.open("r") as f:
93
+ hypothesis_sentences = [
94
+ ln.strip() for ln in f.readlines() if len(ln.strip()) > 1
95
+ ]
96
+
97
+ if not global_alignment and len(reference_sentences) != len(hypothesis_sentences):
98
+ raise ValueError(
99
+ f"Number of sentences does not match. "
100
+ f"{reference_file} contains {len(reference_sentences)} lines."
101
+ f"{hypothesis_file} contains {len(hypothesis_sentences)} lines."
102
+ )
103
+
104
+ if global_alignment and compute_cer:
105
+ raise ValueError("--global and --cer are mutually exclusive.")
106
+
107
+ if compute_cer:
108
+ out = jiwer.process_characters(
109
+ reference_sentences,
110
+ hypothesis_sentences,
111
+ )
112
+ else:
113
+ if global_alignment:
114
+ out = jiwer.process_words(
115
+ reference_sentences,
116
+ hypothesis_sentences,
117
+ reference_transform=jiwer.wer_contiguous,
118
+ hypothesis_transform=jiwer.wer_contiguous,
119
+ )
120
+ else:
121
+ out = jiwer.process_words(reference_sentences, hypothesis_sentences)
122
+
123
+ if show_alignment:
124
+ print(jiwer.visualize_alignment(out, show_measures=True))
125
+ else:
126
+ if compute_cer:
127
+ print(out.cer)
128
+ else:
129
+ print(out.wer)
130
+
131
+
132
+ if __name__ == "__main__":
133
+ cli()
jiwer/jiwer/measures.py ADDED
@@ -0,0 +1,488 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ Convenience methods for calculating a number of similarity error
21
+ measures between a reference and hypothesis sentence.
22
+ These measures are
23
+ commonly used to measure the performance for an automatic speech recognition
24
+ (ASR) system.
25
+
26
+ The following measures are implemented:
27
+
28
+ - Word Error Rate (WER), which is where this library got its name from. This
29
+ has long been (and arguably still is) the de facto standard for computing
30
+ ASR performance.
31
+ - Match Error Rate (MER)
32
+ - Word Information Lost (WIL)
33
+ - Word Information Preserved (WIP)
34
+ - Character Error Rate (CER)
35
+
36
+ Note that these functions merely call
37
+ [jiwer.process_words][process.process_words] and
38
+ [jiwer.process_characters][process.process_characters].
39
+ It is more efficient to call `process_words` or `process_characters` and access the
40
+ results from the
41
+ [jiwer.WordOutput][process.WordOutput] and
42
+ [jiwer.CharacterOutput][process.CharacterOutput]
43
+ classes.
44
+ """
45
+ import warnings
46
+
47
+ from typing import List, Union, Dict, Any
48
+
49
+ from jiwer import transforms as tr
50
+ from jiwer.transformations import wer_default, cer_default
51
+ from jiwer.process import process_words, process_words_embdiff, process_characters
52
+
53
+ __all__ = [
54
+ "wer",
55
+ "wer_embdiff",
56
+ "mer",
57
+ "wil",
58
+ "wip",
59
+ "cer",
60
+ "compute_measures",
61
+ ]
62
+
63
+ ########################################################################################
64
+ # Implementation of the WER method and co, exposed publicly
65
+
66
+
67
+ def wer(
68
+ reference: Union[str, List[str]] = None,
69
+ hypothesis: Union[str, List[str]] = None,
70
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
71
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
72
+ truth: Union[str, List[str]] = None,
73
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
74
+ ) -> float:
75
+ """
76
+ Calculate the word error rate (WER) between one or more reference and
77
+ hypothesis sentences.
78
+
79
+ Args:
80
+ reference: The reference sentence(s)
81
+ hypothesis: The hypothesis sentence(s)
82
+ reference_transform: The transformation(s) to apply to the reference string(s)
83
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
84
+ truth: Deprecated, renamed to `reference`
85
+ truth_transform: Deprecated, renamed to `reference_transform`
86
+
87
+ Deprecated:
88
+ Arguments `truth` and `truth_transform` have been renamed to respectively
89
+ `reference` and `reference_transform`. Therefore, the keyword arguments
90
+ `truth` and `truth_transform` will be removed in the next release.
91
+ At the same time, `reference` and `reference_transform` will lose their
92
+ default value.
93
+
94
+ Returns:
95
+ (float): The word error rate of the given reference and
96
+ hypothesis sentence(s).
97
+ """
98
+ (
99
+ reference,
100
+ hypothesis,
101
+ reference_transform,
102
+ hypothesis_transform,
103
+ ) = _deprecate_truth(
104
+ reference=reference,
105
+ hypothesis=hypothesis,
106
+ truth=truth,
107
+ reference_transform=reference_transform,
108
+ truth_transform=truth_transform,
109
+ hypothesis_transform=hypothesis_transform,
110
+ )
111
+
112
+ output = process_words(
113
+ reference, hypothesis, reference_transform, hypothesis_transform
114
+ )
115
+ return output.wer
116
+
117
+ def wer_embdiff(
118
+ reference: Union[str, List[str]] = None,
119
+ hypothesis: Union[str, List[str]] = None,
120
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
121
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
122
+ truth: Union[str, List[str]] = None,
123
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
124
+ ) -> float:
125
+ """
126
+ Calculate the word error rate (WER) between one or more reference and
127
+ hypothesis sentences.
128
+
129
+ Args:
130
+ reference: The reference sentence(s)
131
+ hypothesis: The hypothesis sentence(s)
132
+ reference_transform: The transformation(s) to apply to the reference string(s)
133
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
134
+ truth: Deprecated, renamed to `reference`
135
+ truth_transform: Deprecated, renamed to `reference_transform`
136
+
137
+ Deprecated:
138
+ Arguments `truth` and `truth_transform` have been renamed to respectively
139
+ `reference` and `reference_transform`. Therefore, the keyword arguments
140
+ `truth` and `truth_transform` will be removed in the next release.
141
+ At the same time, `reference` and `reference_transform` will lose their
142
+ default value.
143
+
144
+ Returns:
145
+ (float): The word error rate of the given reference and
146
+ hypothesis sentence(s).
147
+ """
148
+ (
149
+ reference,
150
+ hypothesis,
151
+ reference_transform,
152
+ hypothesis_transform,
153
+ ) = _deprecate_truth(
154
+ reference=reference,
155
+ hypothesis=hypothesis,
156
+ truth=truth,
157
+ reference_transform=reference_transform,
158
+ truth_transform=truth_transform,
159
+ hypothesis_transform=hypothesis_transform,
160
+ )
161
+
162
+ output, edit_ops = process_words_embdiff(
163
+ reference, hypothesis, reference_transform, hypothesis_transform
164
+ )
165
+ return output, edit_ops
166
+
167
+
168
+ def mer(
169
+ reference: Union[str, List[str]] = None,
170
+ hypothesis: Union[str, List[str]] = None,
171
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
172
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
173
+ truth: Union[str, List[str]] = None,
174
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
175
+ ) -> float:
176
+ """
177
+ Calculate the match error rate (MER) between one or more reference and
178
+ hypothesis sentences.
179
+
180
+ Args:
181
+ reference: The reference sentence(s)
182
+ hypothesis: The hypothesis sentence(s)
183
+ reference_transform: The transformation(s) to apply to the reference string(s)
184
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
185
+ truth: Deprecated, renamed to `reference`
186
+ truth_transform: Deprecated, renamed to `reference_transform`
187
+
188
+ Deprecated:
189
+ Arguments `truth` and `truth_transform` have been renamed to respectively
190
+ `reference` and `reference_transform`. Therefore, the keyword arguments
191
+ `truth` and `truth_transform` will be removed in the next release.
192
+ At the same time, `reference` and `reference_transform` will lose their
193
+ default value.
194
+
195
+ Returns:
196
+ (float): The match error rate of the given reference and
197
+ hypothesis sentence(s).
198
+ """
199
+ (
200
+ reference,
201
+ hypothesis,
202
+ reference_transform,
203
+ hypothesis_transform,
204
+ ) = _deprecate_truth(
205
+ reference=reference,
206
+ hypothesis=hypothesis,
207
+ truth=truth,
208
+ reference_transform=reference_transform,
209
+ truth_transform=truth_transform,
210
+ hypothesis_transform=hypothesis_transform,
211
+ )
212
+
213
+ output = process_words(
214
+ reference, hypothesis, reference_transform, hypothesis_transform
215
+ )
216
+
217
+ return output.mer
218
+
219
+
220
+ def wip(
221
+ reference: Union[str, List[str]] = None,
222
+ hypothesis: Union[str, List[str]] = None,
223
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
224
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
225
+ truth: Union[str, List[str]] = None,
226
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
227
+ ) -> float:
228
+ """
229
+ Calculate the word information preserved (WIP) between one or more reference and
230
+ hypothesis sentences.
231
+
232
+ Args:
233
+ reference: The reference sentence(s)
234
+ hypothesis: The hypothesis sentence(s)
235
+ reference_transform: The transformation(s) to apply to the reference string(s)
236
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
237
+ truth: Deprecated, renamed to `reference`
238
+ truth_transform: Deprecated, renamed to `reference_transform`
239
+
240
+ Deprecated:
241
+ Arguments `truth` and `truth_transform` have been renamed to respectively
242
+ `reference` and `reference_transform`. Therefore, the keyword arguments
243
+ `truth` and `truth_transform` will be removed in the next release.
244
+ At the same time, `reference` and `reference_transform` will lose their
245
+ default value.
246
+
247
+ Returns:
248
+ (float): The word information preserved of the given reference and
249
+ hypothesis sentence(s).
250
+ """
251
+ (
252
+ reference,
253
+ hypothesis,
254
+ reference_transform,
255
+ hypothesis_transform,
256
+ ) = _deprecate_truth(
257
+ reference=reference,
258
+ hypothesis=hypothesis,
259
+ truth=truth,
260
+ reference_transform=reference_transform,
261
+ truth_transform=truth_transform,
262
+ hypothesis_transform=hypothesis_transform,
263
+ )
264
+
265
+ output = process_words(
266
+ reference, hypothesis, reference_transform, hypothesis_transform
267
+ )
268
+
269
+ return output.wip
270
+
271
+
272
+ def wil(
273
+ reference: Union[str, List[str]] = None,
274
+ hypothesis: Union[str, List[str]] = None,
275
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
276
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
277
+ truth: Union[str, List[str]] = None,
278
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
279
+ ) -> float:
280
+ """
281
+ Calculate the word information lost (WIL) between one or more reference and
282
+ hypothesis sentences.
283
+
284
+ Args:
285
+ reference: The reference sentence(s)
286
+ hypothesis: The hypothesis sentence(s)
287
+ reference_transform: The transformation(s) to apply to the reference string(s)
288
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
289
+ truth: Deprecated, renamed to `reference`
290
+ truth_transform: Deprecated, renamed to `reference_transform`
291
+
292
+ Deprecated:
293
+ Arguments `truth` and `truth_transform` have been renamed to respectively
294
+ `reference` and `reference_transform`. Therefore, the keyword arguments
295
+ `truth` and `truth_transform` will be removed in the next release.
296
+ At the same time, `reference` and `reference_transform` will lose their
297
+ default value.
298
+
299
+ Returns:
300
+ (float): The word information lost of the given reference and
301
+ hypothesis sentence(s).
302
+ """
303
+ (
304
+ reference,
305
+ hypothesis,
306
+ reference_transform,
307
+ hypothesis_transform,
308
+ ) = _deprecate_truth(
309
+ reference=reference,
310
+ hypothesis=hypothesis,
311
+ truth=truth,
312
+ reference_transform=reference_transform,
313
+ truth_transform=truth_transform,
314
+ hypothesis_transform=hypothesis_transform,
315
+ )
316
+
317
+ output = process_words(
318
+ reference, hypothesis, reference_transform, hypothesis_transform
319
+ )
320
+
321
+ return output.wil
322
+
323
+
324
+ ########################################################################################
325
+ # deprecated method 'compute_measures'
326
+
327
+
328
+ def compute_measures(
329
+ truth: Union[str, List[str]],
330
+ hypothesis: Union[str, List[str]],
331
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
332
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
333
+ ) -> Dict[str, Any]:
334
+ """
335
+ Efficiently computes all measures using only one function call.
336
+
337
+ Deprecated:
338
+ Deprecated method. Superseded by [jiwer.process_words][process.process_words].
339
+ This method will be removed on next release.
340
+
341
+ Args:
342
+ truth: The reference sentence(s)
343
+ hypothesis: The hypothesis sentence(s)
344
+ truth_transform: The transformation(s) to apply to the reference string(s)
345
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
346
+
347
+ Returns:
348
+ (dict): A dictionary containing key-value pairs for all measures.
349
+
350
+ """
351
+ warnings.warn(
352
+ DeprecationWarning(
353
+ "jiwer.compute_measures() is deprecated. Please use jiwer.process_words()."
354
+ )
355
+ )
356
+
357
+ output = process_words(
358
+ reference=truth,
359
+ hypothesis=hypothesis,
360
+ reference_transform=truth_transform,
361
+ hypothesis_transform=hypothesis_transform,
362
+ )
363
+
364
+ return {
365
+ "wer": output.wer,
366
+ "mer": output.mer,
367
+ "wil": output.wil,
368
+ "wip": output.wip,
369
+ "hits": output.hits,
370
+ "substitutions": output.substitutions,
371
+ "deletions": output.deletions,
372
+ "insertions": output.insertions,
373
+ "ops": output.alignments,
374
+ "truth": output.references,
375
+ "hypothesis": output.hypotheses,
376
+ }
377
+
378
+
379
+ ########################################################################################
380
+ # Implementation of character-error-rate, exposed publicly
381
+
382
+
383
+ def cer(
384
+ reference: Union[str, List[str]] = None,
385
+ hypothesis: Union[str, List[str]] = None,
386
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
387
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
388
+ return_dict: bool = False,
389
+ truth: Union[str, List[str]] = None,
390
+ truth_transform: Union[tr.Compose, tr.AbstractTransform] = None,
391
+ ) -> Union[float, Dict[str, Any]]:
392
+ """
393
+ Calculate the character error rate (CER) between one or more reference and
394
+ hypothesis sentences.
395
+
396
+ Args:
397
+ reference: The reference sentence(s)
398
+ hypothesis: The hypothesis sentence(s)
399
+ reference_transform: The transformation(s) to apply to the reference string(s)
400
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
401
+ return_dict: Deprecated option to return the more results in a dict instead of
402
+ returning only the cer as a single float value
403
+ truth: Deprecated, renamed to `reference`
404
+ truth_transform: Deprecated, renamed to `reference_transform`
405
+
406
+ Deprecated:
407
+ Argument `return_dict` will be deprecated. Please use
408
+ [jiwer.process_characters][process.process_characters] instead.
409
+
410
+ Arguments `truth` and `truth_transform` have been renamed to respectively
411
+ `reference` and `reference_transform`. Therefore, the keyword arguments
412
+ `truth` and `truth_transform` will be removed in the next release.
413
+ At the same time, `reference` and `reference_transform` will lose their
414
+ default value.
415
+
416
+ Returns:
417
+ (float): The character error rate of the given reference and hypothesis
418
+ sentence(s).
419
+ """
420
+ (
421
+ reference,
422
+ hypothesis,
423
+ reference_transform,
424
+ hypothesis_transform,
425
+ ) = _deprecate_truth(
426
+ reference=reference,
427
+ hypothesis=hypothesis,
428
+ truth=truth,
429
+ reference_transform=reference_transform,
430
+ truth_transform=truth_transform,
431
+ hypothesis_transform=hypothesis_transform,
432
+ )
433
+
434
+ output = process_characters(
435
+ reference, hypothesis, reference_transform, hypothesis_transform
436
+ )
437
+
438
+ if return_dict:
439
+ warnings.warn(
440
+ DeprecationWarning(
441
+ "`return_dict` is deprecated, "
442
+ "please use jiwer.process_characters() instead."
443
+ )
444
+ )
445
+ return {
446
+ "cer": output.cer,
447
+ "hits": output.hits,
448
+ "substitutions": output.substitutions,
449
+ "deletions": output.deletions,
450
+ "insertions": output.insertions,
451
+ }
452
+ else:
453
+ return output.cer
454
+
455
+
456
+ def _deprecate_truth(
457
+ reference: Union[str, List[str]],
458
+ hypothesis: Union[str, List[str]],
459
+ truth: Union[str, List[str]],
460
+ reference_transform: Union[tr.Compose, tr.AbstractTransform],
461
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform],
462
+ truth_transform: Union[tr.Compose, tr.AbstractTransform],
463
+ ):
464
+ if truth is not None:
465
+ warnings.warn(
466
+ DeprecationWarning(
467
+ "keyword argument `truth` is deprecated, please use `reference`."
468
+ )
469
+ )
470
+ if reference is not None:
471
+ raise ValueError("cannot give `reference` and `truth`")
472
+ reference = truth
473
+ if truth_transform is not None:
474
+ warnings.warn(
475
+ DeprecationWarning(
476
+ "keyword argument `truth_transform` is deprecated, "
477
+ "please use `reference_transform`."
478
+ )
479
+ )
480
+ reference_transform = truth_transform
481
+
482
+ if reference is None or hypothesis is None:
483
+ raise ValueError(
484
+ "detected default values for reference or hypothesis arguments, "
485
+ "please provide actual string or list of strings"
486
+ )
487
+
488
+ return reference, hypothesis, reference_transform, hypothesis_transform
jiwer/jiwer/process.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ The core algorithm(s) for processing a one or more reference and hypothesis sentences
21
+ so that measures can be computed and an alignment can be visualized.
22
+ """
23
+
24
+ from dataclasses import dataclass
25
+
26
+ from typing import Any, List, Union
27
+ from itertools import chain
28
+
29
+ import rapidfuzz
30
+
31
+ from rapidfuzz.distance import Opcodes
32
+
33
+ from jiwer import transforms as tr
34
+ from jiwer.transformations import wer_default, cer_default
35
+
36
+
37
+ __all__ = [
38
+ "AlignmentChunk",
39
+ "WordOutput",
40
+ "CharacterOutput",
41
+ "process_words",
42
+ "process_words_embdiff",
43
+ "process_characters",
44
+ ]
45
+
46
+
47
+ @dataclass
48
+ class AlignmentChunk:
49
+ """
50
+ Define an alignment between two subsequence of the reference and hypothesis.
51
+
52
+ Attributes:
53
+ type: one of `equal`, `substitute`, `insert`, or `delete`
54
+ ref_start_idx: the start index of the reference subsequence
55
+ ref_end_idx: the end index of the reference subsequence
56
+ hyp_start_idx: the start index of the hypothesis subsequence
57
+ hyp_end_idx: the end index of the hypothesis subsequence
58
+ """
59
+
60
+ type: str
61
+
62
+ ref_start_idx: int
63
+ ref_end_idx: int
64
+
65
+ hyp_start_idx: int
66
+ hyp_end_idx: int
67
+
68
+ def __post_init__(self):
69
+ if self.type not in ["replace", "insert", "delete", "equal", "substitute"]:
70
+ raise ValueError("")
71
+
72
+ # rapidfuzz uses replace instead of substitute... For consistency, we change it
73
+ if self.type == "replace":
74
+ self.type = "substitute"
75
+
76
+ if self.ref_start_idx > self.ref_end_idx:
77
+ raise ValueError(
78
+ f"ref_start_idx={self.ref_start_idx} "
79
+ f"is larger "
80
+ f"than ref_end_idx={self.ref_end_idx}"
81
+ )
82
+ if self.hyp_start_idx > self.hyp_end_idx:
83
+ raise ValueError(
84
+ f"hyp_start_idx={self.hyp_start_idx} "
85
+ f"is larger "
86
+ f"than hyp_end_idx={self.hyp_end_idx}"
87
+ )
88
+
89
+
90
+ @dataclass
91
+ class WordOutput:
92
+ """
93
+ The output of calculating the word-level levenshtein distance between one or more
94
+ reference and hypothesis sentence(s).
95
+
96
+ Attributes:
97
+ references: The reference sentences
98
+ hypotheses: The hypothesis sentences
99
+ alignments: The alignment between reference and hypothesis sentences
100
+ wer: The word error rate
101
+ mer: The match error rate
102
+ wil: The word information lost measure
103
+ wip: The word information preserved measure
104
+ hits: The number of correct words between reference and hypothesis sentences
105
+ substitutions: The number of substitutions required to transform hypothesis
106
+ sentences to reference sentences
107
+ insertions: The number of insertions required to transform hypothesis
108
+ sentences to reference sentences
109
+ deletions: The number of deletions required to transform hypothesis
110
+ sentences to reference sentences
111
+
112
+ """
113
+
114
+ # processed input data
115
+ references: List[List[str]]
116
+ hypotheses: List[List[str]]
117
+
118
+ # alignment
119
+ alignments: List[List[AlignmentChunk]]
120
+
121
+ # measures
122
+ wer: float
123
+ mer: float
124
+ wil: float
125
+ wip: float
126
+
127
+ # stats
128
+ hits: int
129
+ substitutions: int
130
+ insertions: int
131
+ deletions: int
132
+
133
+
134
+ def process_words(
135
+ reference: Union[str, List[str]],
136
+ hypothesis: Union[str, List[str]],
137
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
138
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
139
+ ) -> WordOutput:
140
+ """
141
+ Compute the word-level levenshtein distance and alignment between one or more
142
+ reference and hypothesis sentences. Based on the result, multiple measures
143
+ can be computed, such as the word error rate.
144
+
145
+ Args:
146
+ reference: The reference sentence(s)
147
+ hypothesis: The hypothesis sentence(s)
148
+ reference_transform: The transformation(s) to apply to the reference string(s)
149
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
150
+
151
+ Returns:
152
+ (WordOutput): The processed reference and hypothesis sentences
153
+ """
154
+ # validate input type
155
+ if isinstance(reference, str):
156
+ reference = [reference]
157
+ if isinstance(hypothesis, str):
158
+ hypothesis = [hypothesis]
159
+ if any(len(t) == 0 for t in reference):
160
+ raise ValueError("one or more references are empty strings")
161
+
162
+ # pre-process reference and hypothesis by applying transforms
163
+ ref_transformed = _apply_transform(
164
+ reference, reference_transform, is_reference=True
165
+ )
166
+ hyp_transformed = _apply_transform(
167
+ hypothesis, hypothesis_transform, is_reference=False
168
+ )
169
+
170
+ if len(ref_transformed) != len(hyp_transformed):
171
+ raise ValueError(
172
+ "After applying the transforms on the reference and hypothesis sentences, "
173
+ f"their lengths must match. "
174
+ f"Instead got {len(ref_transformed)} reference and "
175
+ f"{len(hyp_transformed)} hypothesis sentences."
176
+ )
177
+
178
+ # Change each word into a unique character in order to compute
179
+ # word-level levenshtein distance
180
+ ref_as_chars, hyp_as_chars = _word2char(ref_transformed, hyp_transformed)
181
+
182
+ # keep track of total hits, substitutions, deletions and insertions
183
+ # across all input sentences
184
+ num_hits, num_substitutions, num_deletions, num_insertions = 0, 0, 0, 0
185
+
186
+ # also keep track of the total number of words in the reference and hypothesis
187
+ num_rf_words, num_hp_words = 0, 0
188
+
189
+ # anf finally, keep track of the alignment between each reference and hypothesis
190
+ alignments = []
191
+
192
+ for reference_sentence, hypothesis_sentence in zip(ref_as_chars, hyp_as_chars):
193
+ # Get the required edit operations to transform reference into hypothesis
194
+ edit_ops = rapidfuzz.distance.Levenshtein.editops(
195
+ reference_sentence, hypothesis_sentence
196
+ )
197
+
198
+ # count the number of edits of each type
199
+ substitutions = sum(1 if op.tag == "replace" else 0 for op in edit_ops)
200
+ deletions = sum(1 if op.tag == "delete" else 0 for op in edit_ops)
201
+ insertions = sum(1 if op.tag == "insert" else 0 for op in edit_ops)
202
+ hits = len(reference_sentence) - (substitutions + deletions)
203
+
204
+ # update state
205
+ num_hits += hits
206
+ num_substitutions += substitutions
207
+ num_deletions += deletions
208
+ num_insertions += insertions
209
+ num_rf_words += len(reference_sentence)
210
+ num_hp_words += len(hypothesis_sentence)
211
+ alignments.append(
212
+ [
213
+ AlignmentChunk(
214
+ type=op.tag,
215
+ ref_start_idx=op.src_start,
216
+ ref_end_idx=op.src_end,
217
+ hyp_start_idx=op.dest_start,
218
+ hyp_end_idx=op.dest_end,
219
+ )
220
+ for op in Opcodes.from_editops(edit_ops)
221
+ ]
222
+ )
223
+
224
+ # Compute all measures
225
+ S, D, I, H = num_substitutions, num_deletions, num_insertions, num_hits
226
+
227
+ wer = float(S + D + I) / float(H + S + D)
228
+ mer = float(S + D + I) / float(H + S + D + I)
229
+ wip = (
230
+ (float(H) / num_rf_words) * (float(H) / num_hp_words)
231
+ if num_hp_words >= 1
232
+ else 0
233
+ )
234
+ wil = 1 - wip
235
+
236
+ # return all output
237
+ return WordOutput(
238
+ references=ref_transformed,
239
+ hypotheses=hyp_transformed,
240
+ alignments=alignments,
241
+ wer=wer,
242
+ mer=mer,
243
+ wil=wil,
244
+ wip=wip,
245
+ hits=num_hits,
246
+ substitutions=num_substitutions,
247
+ insertions=num_insertions,
248
+ deletions=num_deletions,
249
+ )
250
+
251
+ def process_words_embdiff(
252
+ reference: Union[str, List[str]],
253
+ hypothesis: Union[str, List[str]],
254
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
255
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = wer_default,
256
+ ) -> WordOutput:
257
+ """
258
+ Compute the word-level levenshtein distance and alignment between one or more
259
+ reference and hypothesis sentences. Based on the result, multiple measures
260
+ can be computed, such as the word error rate.
261
+
262
+ Args:
263
+ reference: The reference sentence(s)
264
+ hypothesis: The hypothesis sentence(s)
265
+ reference_transform: The transformation(s) to apply to the reference string(s)
266
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
267
+
268
+ Returns:
269
+ (WordOutput): The processed reference and hypothesis sentences
270
+ """
271
+ # validate input type
272
+ if isinstance(reference, str):
273
+ reference = [reference]
274
+ if isinstance(hypothesis, str):
275
+ hypothesis = [hypothesis]
276
+ if any(len(t) == 0 for t in reference):
277
+ raise ValueError("one or more references are empty strings")
278
+
279
+ # pre-process reference and hypothesis by applying transforms
280
+ ref_transformed = _apply_transform(
281
+ reference, reference_transform, is_reference=True
282
+ )
283
+ hyp_transformed = _apply_transform(
284
+ hypothesis, hypothesis_transform, is_reference=False
285
+ )
286
+
287
+ if len(ref_transformed) != len(hyp_transformed):
288
+ raise ValueError(
289
+ "After applying the transforms on the reference and hypothesis sentences, "
290
+ f"their lengths must match. "
291
+ f"Instead got {len(ref_transformed)} reference and "
292
+ f"{len(hyp_transformed)} hypothesis sentences."
293
+ )
294
+
295
+ # Change each word into a unique character in order to compute
296
+ # word-level levenshtein distance
297
+ ref_as_chars, hyp_as_chars = _word2char(ref_transformed, hyp_transformed)
298
+
299
+ # keep track of total hits, substitutions, deletions and insertions
300
+ # across all input sentences
301
+ num_hits, num_substitutions, num_deletions, num_insertions = 0, 0, 0, 0
302
+
303
+ # also keep track of the total number of words in the reference and hypothesis
304
+ num_rf_words, num_hp_words = 0, 0
305
+
306
+ # anf finally, keep track of the alignment between each reference and hypothesis
307
+ alignments = []
308
+
309
+ for reference_sentence, hypothesis_sentence in zip(ref_as_chars, hyp_as_chars):
310
+ # Get the required edit operations to transform reference into hypothesis
311
+ edit_ops = rapidfuzz.distance.Levenshtein.editops(
312
+ reference_sentence, hypothesis_sentence
313
+ )
314
+
315
+ # count the number of edits of each type
316
+ substitutions = sum(1 if op.tag == "replace" else 0 for op in edit_ops)
317
+ deletions = sum(1 if op.tag == "delete" else 0 for op in edit_ops)
318
+ insertions = sum(1 if op.tag == "insert" else 0 for op in edit_ops)
319
+ hits = len(reference_sentence) - (substitutions + deletions)
320
+
321
+ # update state
322
+ num_hits += hits
323
+ num_substitutions += substitutions
324
+ num_deletions += deletions
325
+ num_insertions += insertions
326
+ num_rf_words += len(reference_sentence)
327
+ num_hp_words += len(hypothesis_sentence)
328
+ alignments.append(
329
+ [
330
+ AlignmentChunk(
331
+ type=op.tag,
332
+ ref_start_idx=op.src_start,
333
+ ref_end_idx=op.src_end,
334
+ hyp_start_idx=op.dest_start,
335
+ hyp_end_idx=op.dest_end,
336
+ )
337
+ for op in Opcodes.from_editops(edit_ops)
338
+ ]
339
+ )
340
+
341
+ # Compute all measures
342
+ S, D, I, H = num_substitutions, num_deletions, num_insertions, num_hits
343
+
344
+ wer = float(S + D + I) / float(H + S + D)
345
+ mer = float(S + D + I) / float(H + S + D + I)
346
+ wip = (
347
+ (float(H) / num_rf_words) * (float(H) / num_hp_words)
348
+ if num_hp_words >= 1
349
+ else 0
350
+ )
351
+ wil = 1 - wip
352
+
353
+ # return all output
354
+ return WordOutput(
355
+ references=ref_transformed,
356
+ hypotheses=hyp_transformed,
357
+ alignments=alignments,
358
+ wer=wer,
359
+ mer=mer,
360
+ wil=wil,
361
+ wip=wip,
362
+ hits=num_hits,
363
+ substitutions=num_substitutions,
364
+ insertions=num_insertions,
365
+ deletions=num_deletions,
366
+ ), edit_ops
367
+
368
+
369
+ ########################################################################################
370
+ # Implementation of character error rate
371
+
372
+
373
+ @dataclass
374
+ class CharacterOutput:
375
+ """
376
+ The output of calculating the character-level levenshtein distance between one or
377
+ more reference and hypothesis sentence(s).
378
+
379
+ Attributes:
380
+ references: The reference sentences
381
+ hypotheses: The hypothesis sentences
382
+ alignments: The alignment between reference and hypothesis sentences
383
+ cer: The character error rate
384
+ hits: The number of correct characters between reference and hypothesis
385
+ sentences
386
+ substitutions: The number of substitutions required to transform hypothesis
387
+ sentences to reference sentences
388
+ insertions: The number of insertions required to transform hypothesis
389
+ sentences to reference sentences
390
+ deletions: The number of deletions required to transform hypothesis
391
+ sentences to reference sentences
392
+ """
393
+
394
+ # processed input data
395
+ references: List[List[str]]
396
+ hypotheses: List[List[str]]
397
+
398
+ # alignment
399
+ alignments: List[List[AlignmentChunk]]
400
+
401
+ # measures
402
+ cer: float
403
+
404
+ # stats
405
+ hits: int
406
+ substitutions: int
407
+ insertions: int
408
+ deletions: int
409
+
410
+
411
+ def process_characters(
412
+ reference: Union[str, List[str]],
413
+ hypothesis: Union[str, List[str]],
414
+ reference_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
415
+ hypothesis_transform: Union[tr.Compose, tr.AbstractTransform] = cer_default,
416
+ ) -> CharacterOutput:
417
+ """
418
+ Compute the character-level levenshtein distance and alignment between one or more
419
+ reference and hypothesis sentences. Based on the result, the character error rate
420
+ can be computed.
421
+
422
+ Note that the by default this method includes space (` `) as a
423
+ character over which the error rate is computed. If this is not desired, the
424
+ reference and hypothesis transform need to be modified.
425
+
426
+ Args:
427
+ reference: The reference sentence(s)
428
+ hypothesis: The hypothesis sentence(s)
429
+ reference_transform: The transformation(s) to apply to the reference string(s)
430
+ hypothesis_transform: The transformation(s) to apply to the hypothesis string(s)
431
+
432
+ Returns:
433
+ (CharacterOutput): The processed reference and hypothesis sentences.
434
+
435
+ """
436
+ # make sure the transforms end with tr.ReduceToListOfListOfChars(),
437
+
438
+ # it's the same as word processing, just every word is of length 1
439
+ result = process_words(
440
+ reference, hypothesis, reference_transform, hypothesis_transform
441
+ )
442
+
443
+ return CharacterOutput(
444
+ references=result.references,
445
+ hypotheses=result.hypotheses,
446
+ alignments=result.alignments,
447
+ cer=result.wer,
448
+ hits=result.hits,
449
+ substitutions=result.substitutions,
450
+ insertions=result.insertions,
451
+ deletions=result.deletions,
452
+ )
453
+
454
+
455
+ ################################################################################
456
+ # Implementation of helper methods
457
+
458
+
459
+ def _apply_transform(
460
+ sentence: Union[str, List[str]],
461
+ transform: Union[tr.Compose, tr.AbstractTransform],
462
+ is_reference: bool,
463
+ ):
464
+ # Apply transforms. The transforms should collapse input to a
465
+ # list with lists of words
466
+ transformed_sentence = transform(sentence)
467
+
468
+ # Validate the output is a list containing lists of strings
469
+ if is_reference:
470
+ if not _is_list_of_list_of_strings(
471
+ transformed_sentence, require_non_empty_lists=True
472
+ ):
473
+ raise ValueError(
474
+ "After applying the transformation, each reference should be a "
475
+ "non-empty list of strings, with each string being a single word."
476
+ )
477
+ else:
478
+ if not _is_list_of_list_of_strings(
479
+ transformed_sentence, require_non_empty_lists=False
480
+ ):
481
+ raise ValueError(
482
+ "After applying the transformation, each hypothesis should be a "
483
+ "list of strings, with each string being a single word."
484
+ )
485
+
486
+ return transformed_sentence
487
+
488
+
489
+ def _is_list_of_list_of_strings(x: Any, require_non_empty_lists: bool):
490
+ if not isinstance(x, list):
491
+ return False
492
+
493
+ for e in x:
494
+ if not isinstance(e, list):
495
+ return False
496
+
497
+ if require_non_empty_lists and len(e) == 0:
498
+ return False
499
+
500
+ if not all([isinstance(s, str) for s in e]):
501
+ return False
502
+
503
+ return True
504
+
505
+
506
+ def _word2char(reference: List[List[str]], hypothesis: List[List[str]]):
507
+ # tokenize each word into an integer
508
+ vocabulary = set(chain(*reference, *hypothesis))
509
+
510
+ if "" in vocabulary:
511
+ raise ValueError(
512
+ "Empty strings cannot be a word. "
513
+ "Please ensure that the given transform removes empty strings."
514
+ )
515
+
516
+ word2char = dict(zip(vocabulary, range(len(vocabulary))))
517
+
518
+ reference_chars = [
519
+ "".join([chr(word2char[w]) for w in sentence]) for sentence in reference
520
+ ]
521
+ hypothesis_chars = [
522
+ "".join([chr(word2char[w]) for w in sentence]) for sentence in hypothesis
523
+ ]
524
+
525
+ return reference_chars, hypothesis_chars
jiwer/jiwer/transformations.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ """
19
+ This file is intended to provide the default transformation which need
20
+ to be applied to input text in order to compute the WER (or similar measures).
21
+
22
+ It also implements some alternative transformations which might be
23
+ useful in specific use cases.
24
+ """
25
+
26
+ import jiwer.transforms as tr
27
+
28
+ __all__ = [
29
+ "wer_default",
30
+ "wer_contiguous",
31
+ "wer_standardize",
32
+ "wer_standardize_contiguous",
33
+ "cer_default",
34
+ "cer_contiguous",
35
+ ]
36
+
37
+ ########################################################################################
38
+ # implement transformations for WER (and accompanying measures)
39
+
40
+ wer_default = tr.Compose(
41
+ [
42
+ tr.RemoveMultipleSpaces(),
43
+ tr.Strip(),
44
+ tr.ReduceToListOfListOfWords(),
45
+ ]
46
+ )
47
+ """
48
+ This is the default transformation when using `proces_words`. Each input string will
49
+ have its leading and tailing white space removed.
50
+ Thereafter multiple spaces between words are also removed.
51
+ Then each string is transformed into a list with lists of strings, where each string
52
+ is a single word.
53
+ """
54
+
55
+ wer_contiguous = tr.Compose(
56
+ [
57
+ tr.RemoveMultipleSpaces(),
58
+ tr.Strip(),
59
+ tr.ReduceToSingleSentence(),
60
+ tr.ReduceToListOfListOfWords(),
61
+ ]
62
+ )
63
+ """
64
+ This is can be used instead of `wer_default` when the number of reference and hypothesis
65
+ sentences differ.
66
+ """
67
+
68
+ wer_standardize = tr.Compose(
69
+ [
70
+ tr.ToLowerCase(),
71
+ tr.ExpandCommonEnglishContractions(),
72
+ tr.RemoveKaldiNonWords(),
73
+ tr.RemoveWhiteSpace(replace_by_space=True),
74
+ tr.RemoveMultipleSpaces(),
75
+ tr.Strip(),
76
+ tr.ReduceToListOfListOfWords(),
77
+ ]
78
+ )
79
+ """
80
+ This transform attempts to standardize the strings by setting all characters to lower
81
+ case, expanding common contractions, and removing non-words. Then the default operations
82
+ are applied.
83
+ """
84
+
85
+ wer_standardize_contiguous = tr.Compose(
86
+ [
87
+ tr.ToLowerCase(),
88
+ tr.ExpandCommonEnglishContractions(),
89
+ tr.RemoveKaldiNonWords(),
90
+ tr.RemoveWhiteSpace(replace_by_space=True),
91
+ tr.RemoveMultipleSpaces(),
92
+ tr.Strip(),
93
+ tr.ReduceToSingleSentence(),
94
+ tr.ReduceToListOfListOfWords(),
95
+ ]
96
+ )
97
+ """
98
+ This is the same as `wer_standize`, but this version can be usd when the number of
99
+ reference and hypothesis sentences differ.
100
+ """
101
+
102
+ ########################################################################################
103
+ # implement transformations for CER
104
+
105
+
106
+ cer_default = tr.Compose(
107
+ [
108
+ tr.Strip(),
109
+ tr.ReduceToListOfListOfChars(),
110
+ ]
111
+ )
112
+ """
113
+ This is the default transformation when using `process_characters`. Each input string
114
+ will have its leading and tailing white space removed. Then each string is
115
+ transformed into a list with lists of strings, where each string is a single character.
116
+ """
117
+
118
+ cer_contiguous = tr.Compose(
119
+ [
120
+ tr.Strip(),
121
+ tr.ReduceToSingleSentence(),
122
+ tr.ReduceToListOfListOfChars(),
123
+ ]
124
+ )
125
+ """
126
+ This can used instead of `cer_default` when the number of reference and hypothesis
127
+ sentences differ.
128
+ """
jiwer/jiwer/transforms.py ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # JiWER - Jitsi Word Error Rate
3
+ #
4
+ # Copyright @ 2018 - present 8x8, Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ """
20
+ This file implements the building blocks for transforming a collection
21
+ of input strings to the desired format in order to calculate the WER of CER.
22
+
23
+ In principle, for word error rate calculations, every string of a sentence needs to be
24
+ collapsed into a list of strings, where each string is a *single* word.
25
+ This is done with [transforms.ReduceToListOfListOfWords][].
26
+ A composition of multiple transformations must therefore *always* end with
27
+ [transforms.ReduceToListOfListOfWords][].
28
+
29
+ For the character error rate, every string of a sentence also needs to be collapsed into
30
+ a list of strings, but here each string is a *single* character.
31
+ This is done with [transforms.ReduceToListOfListOfChars][]. Similarly, a
32
+ composition of multiple transformations must therefore also always end with
33
+ [transforms.ReduceToListOfListOfChars][].
34
+ """
35
+
36
+ import sys
37
+ import functools
38
+ import re
39
+ import string
40
+ import unicodedata
41
+
42
+ from typing import Union, List, Mapping
43
+
44
+
45
+ __all__ = [
46
+ "AbstractTransform",
47
+ "Compose",
48
+ "ExpandCommonEnglishContractions",
49
+ "RemoveEmptyStrings",
50
+ "ReduceToListOfListOfWords",
51
+ "ReduceToListOfListOfChars",
52
+ "ReduceToSingleSentence",
53
+ "RemoveKaldiNonWords",
54
+ "RemoveMultipleSpaces",
55
+ "RemovePunctuation",
56
+ "RemoveSpecificWords",
57
+ "RemoveWhiteSpace",
58
+ "Strip",
59
+ "SubstituteRegexes",
60
+ "SubstituteWords",
61
+ "ToLowerCase",
62
+ "ToUpperCase",
63
+ ]
64
+
65
+
66
+ class AbstractTransform(object):
67
+ """
68
+ The base class of a Transform.
69
+ """
70
+
71
+ def __call__(self, sentences: Union[str, List[str]]):
72
+ """
73
+ Transforms one or more strings.
74
+
75
+ Args:
76
+ sentences: The strings to transform.
77
+
78
+ Returns:
79
+ (Union[str, List[str]]): The transformed strings.
80
+
81
+ """
82
+ if isinstance(sentences, str):
83
+ return self.process_string(sentences)
84
+ elif isinstance(sentences, list):
85
+ return self.process_list(sentences)
86
+ else:
87
+ raise ValueError(
88
+ "input {} was expected to be a string or list of strings".format(
89
+ sentences
90
+ )
91
+ )
92
+
93
+ def process_string(self, s: str):
94
+ raise NotImplementedError()
95
+
96
+ def process_list(self, inp: List[str]):
97
+ return [self.process_string(s) for s in inp]
98
+
99
+
100
+ class Compose(object):
101
+ """
102
+ Chain multiple transformations back-to-back to create a pipeline combining multiple
103
+ transformations.
104
+
105
+ Note that each transformation needs to end with either `ReduceToListOfListOfWords`
106
+ or `ReduceToListOfListOfChars`, depending on whether word error rate,
107
+ or character error rate is desired.
108
+
109
+ Example:
110
+ ```python3
111
+ import jiwer
112
+
113
+ jiwer.Compose([
114
+ jiwer.RemoveMultipleSpaces(),
115
+ jiwer.ReduceToListOfListOfWords()
116
+ ])
117
+ ```
118
+ """
119
+
120
+ def __init__(self, transforms: List[AbstractTransform]):
121
+ """
122
+
123
+ Args:
124
+ transforms: The list of transformations to chain.
125
+ """
126
+ self.transforms = transforms
127
+
128
+ def __call__(self, text):
129
+ for tr in self.transforms:
130
+ text = tr(text)
131
+
132
+ return text
133
+
134
+
135
+ class BaseRemoveTransform(AbstractTransform):
136
+ def __init__(self, tokens_to_remove: List[str], replace_token=""):
137
+ self.tokens_to_remove = tokens_to_remove
138
+ self.replace_token = replace_token
139
+
140
+ def process_string(self, s: str):
141
+ for w in self.tokens_to_remove:
142
+ s = s.replace(w, self.replace_token)
143
+
144
+ return s
145
+
146
+ def process_list(self, inp: List[str]):
147
+ return [self.process_string(s) for s in inp]
148
+
149
+
150
+ class ReduceToListOfListOfWords(AbstractTransform):
151
+ """
152
+ Transforms a single input sentence, or a list of input sentences, into
153
+ a list with lists of words, which is the expected format for calculating the
154
+ edit operations between two input sentences on a word-level.
155
+
156
+ A sentence is assumed to be a string, where words are delimited by a token
157
+ (such as ` `, space). Each string is expected to contain only a single sentence.
158
+ Empty strings (no output) are removed for the list.
159
+
160
+ Example:
161
+ ```python
162
+ import jiwer
163
+
164
+ sentences = ["hi", "this is an example"]
165
+
166
+ print(jiwer.ReduceToListOfListOfWords()(sentences))
167
+ # prints: [['hi'], ['this', 'is', 'an, 'example']]
168
+ ```
169
+ """
170
+
171
+ def __init__(self, word_delimiter: str = " "):
172
+ """
173
+ Args:
174
+ word_delimiter: the character which delimits words. Default is ` ` (space).
175
+ """
176
+ self.word_delimiter = word_delimiter
177
+
178
+ def process_string(self, s: str):
179
+ return [[w for w in s.split(self.word_delimiter) if len(w) >= 1]]
180
+
181
+ def process_list(self, inp: List[str]):
182
+ sentence_collection = []
183
+
184
+ for sentence in inp:
185
+ list_of_words = self.process_string(sentence)[0]
186
+
187
+ sentence_collection.append(list_of_words)
188
+
189
+ if len(sentence_collection) == 0:
190
+ return [[]]
191
+
192
+ return sentence_collection
193
+
194
+
195
+ class ReduceToListOfListOfChars(AbstractTransform):
196
+ """
197
+ Transforms a single input sentence, or a list of input sentences, into
198
+ a list with lists of characters, which is the expected format for calculating the
199
+ edit operations between two input sentences on a character-level.
200
+
201
+ A sentence is assumed to be a string. Each string is expected to contain only a
202
+ single sentence.
203
+
204
+ Example:
205
+ ```python
206
+ import jiwer
207
+
208
+ sentences = ["hi", "this is an example"]
209
+
210
+ print(jiwer.ReduceToListOfListOfChars()(sentences))
211
+ # prints: [['h', 'i'], ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', 'n', ' ', 'e', 'x', 'a', 'm', 'p', 'l', 'e']]
212
+ ```
213
+ """
214
+
215
+ def process_string(self, s: str):
216
+ return [[w for w in s]]
217
+
218
+ def process_list(self, inp: List[str]):
219
+ sentence_collection = []
220
+
221
+ for sentence in inp:
222
+ list_of_words = self.process_string(sentence)[0]
223
+
224
+ sentence_collection.append(list_of_words)
225
+
226
+ if len(sentence_collection) == 0:
227
+ return [[]]
228
+
229
+ return sentence_collection
230
+
231
+
232
+ class ReduceToSingleSentence(AbstractTransform):
233
+ """
234
+ Transforms multiple sentences into a single sentence.
235
+ This operation can be useful when the number of reference and hypothesis sentences
236
+ differ, and you want to do a minimal alignment over these lists.
237
+ Note that this creates an invariance: `wer([a, b], [a, b])` might not be equal to
238
+ `wer([b, a], [b, a])`.
239
+
240
+ Example:
241
+ ```python3
242
+ import jiwer
243
+
244
+ sentences = ["hi", "this is an example"]
245
+
246
+ print(jiwer.ReduceToSingleSentence()(sentences))
247
+ # prints: ['hi this is an example']
248
+ ```
249
+ """
250
+
251
+ def __init__(self, word_delimiter: str = " "):
252
+ """
253
+ :param word_delimiter: the character which delimits words. Default is ` ` (space).
254
+ """
255
+ self.word_delimiter = word_delimiter
256
+
257
+ def process_string(self, s: str):
258
+ return s
259
+
260
+ def process_list(self, inp: List[str]):
261
+ filtered_inp = [i for i in inp if len(i) >= 1]
262
+
263
+ if len(filtered_inp) == 0:
264
+ return []
265
+ else:
266
+ return ["{}".format(self.word_delimiter).join(filtered_inp)]
267
+
268
+
269
+ class SubstituteRegexes(AbstractTransform):
270
+ """
271
+ Transform strings by substituting substrings matching regex expressions into
272
+ another substring.
273
+
274
+ Example:
275
+ ```python
276
+ import jiwer
277
+
278
+ sentences = ["is the world doomed or loved?", "edibles are allegedly cultivated"]
279
+
280
+ # note: the regex string "\b(\w+)ed\b", matches every word ending in 'ed',
281
+ # and "\1" stands for the first group ('\w+). It therefore removes 'ed' in every match.
282
+ print(jiwer.SubstituteRegexes({r"doom": r"sacr", r"\b(\w+)ed\b": r"\1"})(sentences))
283
+
284
+ # prints: ["is the world sacr or lov?", "edibles are allegedly cultivat"]
285
+ ```
286
+ """
287
+
288
+ def __init__(self, substitutions: Mapping[str, str]):
289
+ """
290
+
291
+ Args:
292
+ substitutions: a mapping of regex expressions to replacement strings.
293
+ """
294
+ self.substitutions = substitutions
295
+
296
+ def process_string(self, s: str):
297
+ for key, value in self.substitutions.items():
298
+ s = re.sub(key, value, s)
299
+
300
+ return s
301
+
302
+
303
+ class SubstituteWords(AbstractTransform):
304
+ """
305
+ This transform can be used to replace a word into another word.
306
+ Note that the whole word is matched. If the word you're attempting to substitute
307
+ is a substring of another word it will not be affected.
308
+ For example, if you're substituting `foo` into `bar`, the word `foobar` will NOT
309
+ be substituted into `barbar`.
310
+
311
+ Example:
312
+ ```python
313
+ import jiwer
314
+
315
+ sentences = ["you're pretty", "your book", "foobar"]
316
+
317
+ print(jiwer.SubstituteWords({"pretty": "awesome", "you": "i", "'re": " am", 'foo': 'bar'})(sentences))
318
+
319
+ # prints: ["i am awesome", "your book", "foobar"]
320
+ ```
321
+
322
+ """
323
+
324
+ def __init__(self, substitutions: Mapping[str, str]):
325
+ """
326
+ Args:
327
+ substitutions: A mapping of words to replacement words.
328
+ """
329
+ self.substitutions = substitutions
330
+
331
+ def process_string(self, s: str):
332
+ for key, value in self.substitutions.items():
333
+ s = re.sub(r"\b{}\b".format(re.escape(key)), value, s)
334
+
335
+ return s
336
+
337
+
338
+ class RemoveSpecificWords(SubstituteWords):
339
+ """
340
+ Can be used to filter out certain words.
341
+ As words are replaced with a ` ` character, make sure to that
342
+ `RemoveMultipleSpaces`, `Strip()` and `RemoveEmptyStrings` are present
343
+ in the composition _after_ `RemoveSpecificWords`.
344
+
345
+ Example:
346
+ ```python
347
+ import jiwer
348
+
349
+ sentences = ["yhe awesome", "the apple is not a pear", "yhe"]
350
+
351
+ print(jiwer.RemoveSpecificWords(["yhe", "the", "a"])(sentences))
352
+ # prints: [' awesome', ' apple is not pear', ' ']
353
+ # note the extra spaces
354
+ ```
355
+ """
356
+
357
+ def __init__(self, words_to_remove: List[str]):
358
+ """
359
+ Args:
360
+ words_to_remove: List of words to remove.
361
+ """
362
+ mapping = {word: " " for word in words_to_remove}
363
+
364
+ super().__init__(mapping)
365
+
366
+
367
+ class RemoveWhiteSpace(BaseRemoveTransform):
368
+ """
369
+ This transform filters out white space characters.
370
+ Note that by default space (` `) is also removed, which will make it impossible to
371
+ split a sentence into a list of words by using `ReduceToListOfListOfWords` or
372
+ `ReduceToSingleSentence`.
373
+ This can be prevented by replacing all whitespace with the space character.
374
+ If so, make sure that `jiwer.RemoveMultipleSpaces`,
375
+ `Strip()` and `RemoveEmptyStrings` are present in the composition _after_
376
+ `RemoveWhiteSpace`.
377
+
378
+ Example:
379
+ ```python
380
+ import jiwer
381
+
382
+ sentences = ["this is an example", "hello world\t"]
383
+
384
+ print(jiwer.RemoveWhiteSpace()(sentences))
385
+ # prints: ["thisisanexample", "helloworld"]
386
+
387
+ print(jiwer.RemoveWhiteSpace(replace_by_space=True)(sentences))
388
+ # prints: ["this is an example", "hello world "]
389
+ # note the trailing spaces
390
+ ```
391
+ """
392
+
393
+ def __init__(self, replace_by_space: bool = False):
394
+ """
395
+
396
+ Args:
397
+ replace_by_space: every white space character is replaced with a space (` `)
398
+ """
399
+ characters = [c for c in string.whitespace]
400
+
401
+ if replace_by_space:
402
+ replace_token = " "
403
+ else:
404
+ replace_token = ""
405
+
406
+ super().__init__(characters, replace_token=replace_token)
407
+
408
+
409
+ @functools.lru_cache(1)
410
+ def _get_punctuation_characters():
411
+ """Compute the punctuation characters only once and memoize."""
412
+ codepoints = range(sys.maxunicode + 1)
413
+ punctuation = set(
414
+ chr(i) for i in codepoints if unicodedata.category(chr(i)).startswith("P")
415
+ )
416
+ return punctuation
417
+
418
+
419
+ class RemovePunctuation(BaseRemoveTransform):
420
+ """
421
+ This transform filters out punctuation. The punctuation characters are defined as
422
+ all unicode characters whose category name starts with `P`.
423
+ See [here](https://www.unicode.org/reports/tr44/#General_Category_Values) for more
424
+ information.
425
+ Example:
426
+ ```python
427
+ import jiwer
428
+
429
+ sentences = ["this is an example!", "hello. goodbye"]
430
+
431
+ print(jiwer.RemovePunctuation()(sentences))
432
+ # prints: ['this is an example', "hello goodbye"]
433
+ ```
434
+ """
435
+
436
+ def __init__(self):
437
+ punctuation_characters = _get_punctuation_characters()
438
+ super().__init__(punctuation_characters)
439
+
440
+
441
+ class RemoveMultipleSpaces(AbstractTransform):
442
+ """
443
+ Filter out multiple spaces between words.
444
+
445
+ Example:
446
+ ```python
447
+ import jiwer
448
+
449
+ sentences = ["this is an example ", " hello goodbye ", " "]
450
+
451
+ print(jiwer.RemoveMultipleSpaces()(sentences))
452
+ # prints: ['this is an example ', " hello goodbye ", " "]
453
+ # note that there are still trailing spaces
454
+ ```
455
+
456
+ """
457
+
458
+ def process_string(self, s: str):
459
+ return re.sub(r"\s\s+", " ", s)
460
+
461
+ def process_list(self, inp: List[str]):
462
+ return [self.process_string(s) for s in inp]
463
+
464
+
465
+ class Strip(AbstractTransform):
466
+ """
467
+ Removes all leading and trailing spaces.
468
+
469
+ Example:
470
+ ```python
471
+ import jiwer
472
+
473
+ sentences = [" this is an example ", " hello goodbye ", " "]
474
+
475
+ print(jiwer.Strip()(sentences))
476
+ # prints: ['this is an example', "hello goodbye", ""]
477
+ # note that there is an empty string left behind which might need to be cleaned up
478
+ ```
479
+ """
480
+
481
+ def process_string(self, s: str):
482
+ return s.strip()
483
+
484
+
485
+ class RemoveEmptyStrings(AbstractTransform):
486
+ """
487
+ Remove empty strings from a list of strings.
488
+
489
+ Example:
490
+ ```python
491
+ import jiwer
492
+
493
+ sentences = ["", "this is an example", " ", " "]
494
+
495
+ print(jiwer.RemoveEmptyStrings()(sentences))
496
+ # prints: ['this is an example']
497
+ ```
498
+ """
499
+
500
+ def process_string(self, s: str):
501
+ return s.strip()
502
+
503
+ def process_list(self, inp: List[str]):
504
+ return [s for s in inp if self.process_string(s) != ""]
505
+
506
+
507
+ class ExpandCommonEnglishContractions(AbstractTransform):
508
+ """
509
+ Replace common contractions such as `let's` to `let us`.
510
+
511
+ Currently, this method will perform the following replacements. Note that `␣` is
512
+ used to indicate a space (` `) to get around markdown rendering constrains.
513
+
514
+ | Contraction | transformed into |
515
+ | ------------- |:----------------:|
516
+ | `won't` | `␣will not` |
517
+ | `can't` | `␣can not` |
518
+ | `let's` | `␣let us` |
519
+ | `n't` | `␣not` |
520
+ | `'re` | `␣are` |
521
+ | `'s` | `␣is` |
522
+ | `'d` | `␣would` |
523
+ | `'ll` | `␣will` |
524
+ | `'t` | `␣not` |
525
+ | `'ve` | `␣have` |
526
+ | `'m` | `␣am` |
527
+
528
+ Example:
529
+ ```python
530
+ import jiwer
531
+
532
+ sentences = ["she'll make sure you can't make it", "let's party!"]
533
+
534
+ print(jiwer.ExpandCommonEnglishContractions()(sentences))
535
+ # prints: ["she will make sure you can not make it", "let us party!"]
536
+ ```
537
+
538
+ """
539
+
540
+ def process_string(self, s: str):
541
+ # definitely a non exhaustive list
542
+
543
+ # specific words
544
+ s = re.sub(r"won't", "will not", s)
545
+ s = re.sub(r"can\'t", "can not", s)
546
+ s = re.sub(r"let\'s", "let us", s)
547
+
548
+ # general attachments
549
+ s = re.sub(r"n\'t", " not", s)
550
+ s = re.sub(r"\'re", " are", s)
551
+ s = re.sub(r"\'s", " is", s)
552
+ s = re.sub(r"\'d", " would", s)
553
+ s = re.sub(r"\'ll", " will", s)
554
+ s = re.sub(r"\'t", " not", s)
555
+ s = re.sub(r"\'ve", " have", s)
556
+ s = re.sub(r"\'m", " am", s)
557
+
558
+ return s
559
+
560
+
561
+ class ToLowerCase(AbstractTransform):
562
+ """
563
+ Convert every character into lowercase.
564
+ Example:
565
+ ```python
566
+ import jiwer
567
+
568
+ sentences = ["You're PRETTY"]
569
+
570
+ print(jiwer.ToLowerCase()(sentences))
571
+
572
+ # prints: ["you're pretty"]
573
+ ```
574
+ """
575
+
576
+ def process_string(self, s: str):
577
+ return s.lower()
578
+
579
+
580
+ class ToUpperCase(AbstractTransform):
581
+ """
582
+ Convert every character to uppercase.
583
+
584
+ Example:
585
+ ```python
586
+ import jiwer
587
+
588
+ sentences = ["You're amazing"]
589
+
590
+ print(jiwer.ToUpperCase()(sentences))
591
+
592
+ # prints: ["YOU'RE AMAZING"]
593
+ ```
594
+ """
595
+
596
+ def process_string(self, s: str):
597
+ return s.upper()
598
+
599
+
600
+ class RemoveKaldiNonWords(AbstractTransform):
601
+ """
602
+ Remove any word between `[]` and `<>`. This can be useful when working
603
+ with hypotheses from the Kaldi project, which can output non-words such as
604
+ `[laugh]` and `<unk>`.
605
+
606
+ Example:
607
+ ```python
608
+ import jiwer
609
+
610
+ sentences = ["you <unk> like [laugh]"]
611
+
612
+ print(jiwer.RemoveKaldiNonWords()(sentences))
613
+
614
+ # prints: ["you like "]
615
+ # note the extra spaces
616
+ ```
617
+ """
618
+
619
+ def process_string(self, s: str):
620
+ return re.sub(r"[<\[][^>\]]*[>\]]", "", s)
jiwer/mkdocs.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ site_name: "jiwer"
2
+ site_description: "Documentation for jiwer."
3
+ site_url: "https://jitsi.github.io/jiwer/"
4
+ repo_url: "https://github.com/jitsi/jiwer"
5
+ edit_uri: "blob/master/docs/"
6
+ repo_name: "jitsi/jiwer"
7
+ site_dir: "site"
8
+
9
+ theme:
10
+ name: "material"
11
+ features:
12
+ - content.code.annotate
13
+ - navigation.tabs
14
+ - navigation.tabs.sticky
15
+ - navigation.top
16
+
17
+ plugins:
18
+ - mkdocstrings:
19
+ handlers:
20
+ python:
21
+ paths: [jiwer]
22
+ options:
23
+ separate_signature: true
24
+ show_root_heading: true
25
+ show_root_members_full_path: false
26
+ - search
27
+ - gen-files:
28
+ scripts:
29
+ - docs/gen_ref_pages.py
30
+ - literate-nav:
31
+ nav_file: SUMMARY.md
32
+ - autorefs
33
+
34
+ nav:
35
+ - jiwer: index.md
36
+ - Usage: usage.md
37
+ - Command-line interface: cli.md
38
+ - API reference: reference/
39
+
40
+ markdown_extensions:
41
+ - toc:
42
+ permalink: "#"
43
+ - pymdownx.superfences
44
+
45
+ watch:
46
+ - docs
47
+ - jiwer
jiwer/pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "jiwer"
3
+ version = "3.0.2"
4
+ description = "Evaluate your speech-to-text system with similarity measures such as word error rate (WER)"
5
+ authors = ["Nik Vaessen <[email protected]>"]
6
+ readme = "README.md"
7
+ license = "Apache-2.0"
8
+ repository = "https://github.com/jitsi/jiwer"
9
+ include = ["LICENCE"]
10
+
11
+ [tool.poetry.dependencies]
12
+ python = "^3.7"
13
+ rapidfuzz = "2.13.7"
14
+ click = "^8.1.3"
15
+
16
+ [tool.poetry.group.dev.dependencies]
17
+ black = "^22.8.0"
18
+ pytest = "7.1.3"
19
+ pytest-benchmark = "^3.4.1"
20
+ flake8 = "5.0.4"
21
+
22
+ [tool.poetry.scripts]
23
+ jiwer = "jiwer.cli:cli"
24
+
25
+ [build-system]
26
+ requires = ["poetry-core>=1.0.0"]
27
+ build-backend = "poetry.core.masonry.api"
jiwer/tests/__init__.py ADDED
File without changes
jiwer/tests/test_alignment.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ import jiwer
3
+
4
+
5
+ class TestAlignmentVisualizationWords(unittest.TestCase):
6
+ def test_insertion(self):
7
+ correct_alignment = (
8
+ "sentence 1\n"
9
+ "REF: this is a ****\n"
10
+ "HYP: this is a test\n"
11
+ " I\n"
12
+ )
13
+ alignment = jiwer.visualize_alignment(
14
+ jiwer.process_words("this is a", "this is a test"), show_measures=False
15
+ )
16
+ self.assertEqual(alignment, correct_alignment)
17
+
18
+ pass
19
+
20
+ def test_deletion(self):
21
+ correct_alignment = (
22
+ "sentence 1\n"
23
+ "REF: this is a test\n"
24
+ "HYP: this is a ****\n"
25
+ " D\n"
26
+ )
27
+ alignment = jiwer.visualize_alignment(
28
+ jiwer.process_words("this is a test", "this is a"), show_measures=False
29
+ )
30
+ self.assertEqual(alignment, correct_alignment)
31
+
32
+ def test_substitution(self):
33
+ correct_alignment = (
34
+ "sentence 1\n"
35
+ "REF: this is a test\n"
36
+ "HYP: this was a test\n"
37
+ " S \n"
38
+ )
39
+ alignment = jiwer.visualize_alignment(
40
+ jiwer.process_words("this is a test", "this was a test"),
41
+ show_measures=False,
42
+ )
43
+ self.assertEqual(alignment, correct_alignment)
44
+
45
+ def test_all_three(self):
46
+ correct_alignment = (
47
+ "sentence 1\n"
48
+ "REF: this is a ***** test of skill\n"
49
+ "HYP: this was a messy test ** *****\n"
50
+ " S I D D\n"
51
+ )
52
+ alignment = jiwer.visualize_alignment(
53
+ jiwer.process_words("this is a test of skill", "this was a messy test"),
54
+ show_measures=False,
55
+ )
56
+ self.assertEqual(alignment, correct_alignment)
57
+
58
+ def test_show_measures(self):
59
+ correct_alignment = (
60
+ "sentence 1\n"
61
+ "REF: this test will have a high word error rate\n"
62
+ "HYP: no it will not * **** **** ***** ****\n"
63
+ " S S S D D D D D\n"
64
+ "\n"
65
+ "number of sentences: 1\n"
66
+ "substitutions=3 deletions=5 insertions=0 hits=1\n"
67
+ "\n"
68
+ "mer=88.89%\n"
69
+ "wil=97.22%\n"
70
+ "wip=2.78%\n"
71
+ "wer=88.89%\n"
72
+ )
73
+ alignment = jiwer.visualize_alignment(
74
+ jiwer.process_words(
75
+ "this test will have a high word error rate", "no it will not"
76
+ ),
77
+ show_measures=True,
78
+ )
79
+ self.assertEqual(alignment, correct_alignment)
80
+
81
+ def test_empty_hypothesis(self):
82
+ correct_alignment = "sentence 1\n" "REF: empty\n" "HYP: *****\n" " D\n"
83
+ alignment = jiwer.visualize_alignment(
84
+ jiwer.process_words("empty", ""), show_measures=False
85
+ )
86
+ self.assertEqual(alignment, correct_alignment)
87
+
88
+ def test_multiple_sentences(self):
89
+ correct_alignment = (
90
+ "sentence 1\n"
91
+ "REF: one\n"
92
+ "HYP: 1\n"
93
+ " S\n"
94
+ "\n"
95
+ "sentence 2\n"
96
+ "REF: two\n"
97
+ "HYP: 2\n"
98
+ " S\n"
99
+ )
100
+ alignment = jiwer.visualize_alignment(
101
+ jiwer.process_words(["one", "two"], ["1", "2"]),
102
+ show_measures=False,
103
+ )
104
+ self.assertEqual(alignment, correct_alignment)
105
+
106
+ def test_skip_correct(self):
107
+ correct_alignment = (
108
+ "sentence 2\n"
109
+ "REF: one\n"
110
+ "HYP: 1\n"
111
+ " S\n"
112
+ "\n"
113
+ "sentence 3\n"
114
+ "REF: two\n"
115
+ "HYP: 2\n"
116
+ " S\n"
117
+ )
118
+ alignment = jiwer.visualize_alignment(
119
+ jiwer.process_words(
120
+ ["perfect", "one", "two", "three"], ["perfect", "1", "2", "three"]
121
+ ),
122
+ show_measures=False,
123
+ )
124
+ self.assertEqual(alignment, correct_alignment)
125
+
126
+
127
+ class TestAlignmentVisualizationCharacters(unittest.TestCase):
128
+ def test_insertion(self):
129
+ correct_alignment = (
130
+ "sentence 1\n"
131
+ "REF: this is a*****\n"
132
+ "HYP: this is a test\n"
133
+ " IIIII\n"
134
+ )
135
+ alignment = jiwer.visualize_alignment(
136
+ jiwer.process_characters("this is a", "this is a test"), show_measures=False
137
+ )
138
+ self.assertEqual(alignment, correct_alignment)
139
+
140
+ pass
141
+
142
+ def test_deletion(self):
143
+ correct_alignment = (
144
+ "sentence 1\n"
145
+ "REF: this is a test\n"
146
+ "HYP: this is a*****\n"
147
+ " DDDDD\n"
148
+ )
149
+ alignment = jiwer.visualize_alignment(
150
+ jiwer.process_characters("this is a test", "this is a"), show_measures=False
151
+ )
152
+ self.assertEqual(alignment, correct_alignment)
153
+
154
+ def test_substitution(self):
155
+ correct_alignment = (
156
+ "sentence 1\n"
157
+ "REF: this is a test\n"
158
+ "HYP: this iz a test\n"
159
+ " S \n"
160
+ )
161
+ alignment = jiwer.visualize_alignment(
162
+ jiwer.process_characters("this is a test", "this iz a test"),
163
+ show_measures=False,
164
+ )
165
+ self.assertEqual(alignment, correct_alignment)
166
+
167
+ def test_all_three(self):
168
+ correct_alignment = (
169
+ "sentence 1\n"
170
+ "REF: this *is a tes*t of skill\n"
171
+ "HYP: this was a messy te*st***\n"
172
+ " IS S IS SSD SDDD\n"
173
+ )
174
+ alignment = jiwer.visualize_alignment(
175
+ jiwer.process_characters(
176
+ "this is a test of skill", "this was a messy test"
177
+ ),
178
+ show_measures=False,
179
+ )
180
+ self.assertEqual(alignment, correct_alignment)
181
+
182
+ def test_show_measures(self):
183
+ correct_alignment = (
184
+ "sentence 1\n"
185
+ "REF: this test will have a high word error rate\n"
186
+ "HYP: no** i**t will n*************o***********t*\n"
187
+ " SSDD SDD SDDDDDDDDDDDDD DDDDDDDDDDD D\n"
188
+ "\n"
189
+ "number of sentences: 1\n"
190
+ "substitutions=4 deletions=29 insertions=0 hits=10\n"
191
+ "\n"
192
+ "cer=76.74%\n"
193
+ )
194
+ alignment = jiwer.visualize_alignment(
195
+ jiwer.process_characters(
196
+ "this test will have a high word error rate", "no it will not"
197
+ ),
198
+ show_measures=True,
199
+ )
200
+ self.assertEqual(alignment, correct_alignment)
201
+
202
+ def test_empty_hypothesis(self):
203
+ correct_alignment = "sentence 1\n" "REF: empty\n" "HYP: *****\n" " DDDDD\n"
204
+ alignment = jiwer.visualize_alignment(
205
+ jiwer.process_characters("empty", ""), show_measures=False
206
+ )
207
+ self.assertEqual(alignment, correct_alignment)
208
+
209
+ def test_multiple_sentences(self):
210
+ correct_alignment = (
211
+ "sentence 1\n"
212
+ "REF: one\n"
213
+ "HYP: 1**\n"
214
+ " SDD\n"
215
+ "\n"
216
+ "sentence 2\n"
217
+ "REF: two\n"
218
+ "HYP: 2**\n"
219
+ " SDD\n"
220
+ )
221
+ alignment = jiwer.visualize_alignment(
222
+ jiwer.process_characters(["one", "two"], ["1", "2"]),
223
+ show_measures=False,
224
+ )
225
+ self.assertEqual(alignment, correct_alignment)
jiwer/tests/test_cer.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ import pytest
3
+
4
+ import jiwer
5
+
6
+ from .test_measures import assert_dict_almost_equal
7
+
8
+
9
+ class TestCERInputMethods(unittest.TestCase):
10
+ def test_input_ref_string_hyp_string(self):
11
+ cases = [
12
+ ("This is a test", "This is a test", 0 / 14),
13
+ ("This is a test", "", 14 / 14),
14
+ ("This is a test", "This test", 5 / 14),
15
+ ]
16
+
17
+ self._apply_test_on(cases)
18
+
19
+ def test_input_ref_string_hyp_list(self):
20
+ cases = [
21
+ ("This is a test", ["This is a test"], 0 / 14),
22
+ ("This is a test", [""], 14 / 14),
23
+ ("This is a test", ["This test"], 5 / 14),
24
+ ]
25
+
26
+ self._apply_test_on(cases)
27
+
28
+ def test_input_ref_list_hyp_string(self):
29
+ cases = [
30
+ (["This is a test"], "This is a test", 0 / 14),
31
+ (["This is a test"], "", 14 / 14),
32
+ (["This is a test"], "This test", 5 / 14),
33
+ ]
34
+
35
+ self._apply_test_on(cases)
36
+
37
+ def test_input_ref_list_hyp_list(self):
38
+ cases = [
39
+ (["This is a test"], ["This is a test"], 0 / 14),
40
+ (["This is a test"], [""], 14 / 14),
41
+ (["This is a test"], ["This test"], 5 / 14),
42
+ ]
43
+
44
+ self._apply_test_on(cases)
45
+
46
+ def test_fail_on_different_sentence_length(self):
47
+ def callback():
48
+ jiwer.cer(["hello", "this", "sentence", "is fractured"], ["this sentence"])
49
+
50
+ self.assertRaises(ValueError, callback)
51
+
52
+ def test_fail_on_empty_reference(self):
53
+ def callback():
54
+ jiwer.cer("", "test")
55
+
56
+ self.assertRaises(ValueError, callback)
57
+
58
+ def test_known_values(self):
59
+ # Taken from the "From WER and RIL to MER and WIL" paper, for link see README.md
60
+ cases = [
61
+ (
62
+ "X",
63
+ "X",
64
+ 0,
65
+ ),
66
+ (
67
+ "X",
68
+ "X X Y Y",
69
+ 6,
70
+ ),
71
+ (
72
+ "X Y X",
73
+ "X Z",
74
+ 3 / 5,
75
+ ),
76
+ (
77
+ "X",
78
+ "Y",
79
+ 1,
80
+ ),
81
+ (
82
+ "X",
83
+ "Y Z",
84
+ 3,
85
+ ),
86
+ ]
87
+
88
+ self._apply_test_on(cases)
89
+
90
+ def test_permutations_invariance(self):
91
+ cases = [
92
+ (
93
+ ["i", "am i good"],
94
+ ["i am", "i good"],
95
+ 0.6,
96
+ ),
97
+ (
98
+ ["am i good", "i"],
99
+ [
100
+ "i good",
101
+ "i am",
102
+ ],
103
+ 0.6,
104
+ ),
105
+ ]
106
+
107
+ self._apply_test_on(cases)
108
+
109
+ def test_return_dict(self):
110
+ # TODO: remove unit test once deprecated
111
+ with pytest.deprecated_call():
112
+ return_dict = jiwer.cer(
113
+ ["i", "am i good"], ["i am", "y good"], return_dict=True
114
+ )
115
+
116
+ assert_dict_almost_equal(
117
+ self,
118
+ return_dict,
119
+ {
120
+ "cer": 0.7,
121
+ "hits": 6,
122
+ "substitutions": 1,
123
+ "deletions": 3,
124
+ "insertions": 3,
125
+ },
126
+ delta=1e-16,
127
+ )
128
+
129
+ def _apply_test_on(self, cases):
130
+ for ref, hyp, correct_cer in cases:
131
+ cer = jiwer.cer(reference=ref, hypothesis=hyp)
132
+
133
+ self.assertTrue(isinstance(cer, float))
134
+ if isinstance(cer, float):
135
+ self.assertAlmostEqual(cer, correct_cer, delta=1e-16)
jiwer/tests/test_measures.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import pytest
4
+
5
+ import jiwer
6
+
7
+
8
+ def all_m(wer, mer, wil):
9
+ return {
10
+ "wer": wer,
11
+ "mer": mer,
12
+ "wip": 1 - wil,
13
+ "wil": wil,
14
+ }
15
+
16
+
17
+ def to_measure_dict(x: jiwer.WordOutput):
18
+ return {"wer": x.wer, "mer": x.mer, "wip": x.wip, "wil": x.wil}
19
+
20
+
21
+ def assert_dict_almost_equal(
22
+ test_case: unittest.TestCase, a, b, places=None, msg=None, delta=None
23
+ ):
24
+ test_case.assertIsInstance(a, dict)
25
+ test_case.assertIsInstance(b, dict)
26
+ test_case.assertEqual(set(a.keys()), set(b.keys()))
27
+
28
+ for k in a.keys():
29
+ test_case.assertAlmostEqual(a[k], b[k], places=places, msg=msg, delta=delta)
30
+
31
+
32
+ class TestMeasuresContiguousSentencesTransform(unittest.TestCase):
33
+ def test_input_ref_string_hyp_string(self):
34
+ cases = [
35
+ ("This is a test", "This is a test", all_m(0, 0, 0)),
36
+ ("This is a test", "", all_m(1, 1, 1)),
37
+ ("This is a test", "This test", all_m(0.5, 0.5, 0.5)),
38
+ ]
39
+
40
+ self._apply_test_on(cases)
41
+
42
+ def test_input_ref_string_hyp_list(self):
43
+ cases = [
44
+ ("This is a test", ["This is a test"], all_m(0, 0, 0)),
45
+ ("This is a test", [""], all_m(1, 1, 1)),
46
+ ("This is a test", ["This test"], all_m(0.5, 0.5, 0.5)),
47
+ ]
48
+
49
+ self._apply_test_on(cases)
50
+
51
+ def test_input_ref_list_hyp_string(self):
52
+ cases = [
53
+ (["This is a test"], "This is a test", all_m(0, 0, 0)),
54
+ (["This is a test"], "", all_m(1, 1, 1)),
55
+ (["This is a test"], "This test", all_m(0.5, 0.5, 0.5)),
56
+ ]
57
+
58
+ self._apply_test_on(cases)
59
+
60
+ def test_input_ref_list_hyp_list(self):
61
+ cases = [
62
+ (["This is a test"], ["This is a test"], all_m(0, 0, 0)),
63
+ (["This is a test"], [""], all_m(1, 1, 1)),
64
+ (["This is a test"], ["This test"], all_m(0.5, 0.5, 0.5)),
65
+ ]
66
+
67
+ self._apply_test_on(cases)
68
+
69
+ def test_different_sentence_length_equal_type(self):
70
+ cases = [
71
+ (
72
+ ["hello", "this", "sentence", "is fractured"],
73
+ ["this sentence"],
74
+ all_m(0.6, 0.6, 0.6),
75
+ ),
76
+ (
77
+ "i am a short ground truth",
78
+ "i am a considerably longer and very much incorrect hypothesis",
79
+ all_m(7 / 6, 0.7, 0.85),
80
+ ),
81
+ ]
82
+
83
+ self._apply_test_on(cases)
84
+
85
+ def test_different_sentence_length_unequaL_type(self):
86
+ reference = [
87
+ "i like monthy python",
88
+ "what do you mean african or european swallow",
89
+ ]
90
+ hypothesis = ["i like", "python", "what you mean", "or swallow"]
91
+ x = jiwer.process_words(
92
+ reference,
93
+ hypothesis,
94
+ reference_transform=jiwer.transformations.wer_contiguous,
95
+ hypothesis_transform=jiwer.transformations.wer_contiguous,
96
+ )
97
+ x_dict = to_measure_dict(x)
98
+
99
+ # is equivalent to
100
+
101
+ reference = "i like monthy python what do you mean african or european swallow"
102
+ hypothesis = "i like python what you mean or swallow"
103
+ y = jiwer.process_words(
104
+ reference,
105
+ hypothesis,
106
+ reference_transform=jiwer.transformations.wer_contiguous,
107
+ hypothesis_transform=jiwer.transformations.wer_contiguous,
108
+ )
109
+ y_dict = to_measure_dict(y)
110
+
111
+ assert_dict_almost_equal(self, x_dict, y_dict, delta=1e-9)
112
+
113
+ def test_fail_on_empty_reference(self):
114
+ for method in [
115
+ jiwer.wer,
116
+ jiwer.wil,
117
+ jiwer.wip,
118
+ jiwer.mer,
119
+ jiwer.compute_measures,
120
+ ]:
121
+
122
+ def callback():
123
+ method("", "test")
124
+
125
+ self.assertRaises(ValueError, callback)
126
+
127
+ def test_known_values(self):
128
+ # Taken from the "From WER and RIL to MER and WIL" paper, for link see README.md
129
+ cases = [
130
+ (
131
+ "X",
132
+ "X",
133
+ all_m(0, 0, 0),
134
+ ),
135
+ (
136
+ "X",
137
+ "X X Y Y",
138
+ all_m(3, 0.75, 0.75),
139
+ ),
140
+ (
141
+ "X Y X",
142
+ "X Z",
143
+ all_m(2 / 3, 2 / 3, 5 / 6),
144
+ ),
145
+ (
146
+ "X",
147
+ "Y",
148
+ all_m(1, 1, 1),
149
+ ),
150
+ (
151
+ "X",
152
+ "Y Z",
153
+ all_m(2, 1, 1),
154
+ ),
155
+ ]
156
+
157
+ self._apply_test_on(cases)
158
+
159
+ def test_permutations_variance(self):
160
+ cases = [
161
+ (
162
+ ["i", "am i good"],
163
+ ["i am", "i good"],
164
+ all_m(0.0, 0.0, 0),
165
+ ),
166
+ (
167
+ ["am i good", "i"],
168
+ [
169
+ "i good",
170
+ "i am",
171
+ ],
172
+ all_m(0.5, 0.4, 7 / 16),
173
+ ),
174
+ ]
175
+
176
+ self._apply_test_on(cases)
177
+
178
+ def _apply_test_on(self, cases):
179
+ for ref, hyp, correct_measures in cases:
180
+ output = jiwer.process_words(
181
+ reference=ref,
182
+ hypothesis=hyp,
183
+ reference_transform=jiwer.transformations.wer_contiguous,
184
+ hypothesis_transform=jiwer.transformations.wer_contiguous,
185
+ )
186
+ output_dict = to_measure_dict(output)
187
+
188
+ assert_dict_almost_equal(self, output_dict, correct_measures, delta=1e-16)
189
+
190
+
191
+ class TestMeasuresDefaultTransform(unittest.TestCase):
192
+ def test_input_gt_string_h_string(self):
193
+ cases = [
194
+ ("This is a test", "This is a test", all_m(0, 0, 0)),
195
+ ("This is a test", "", all_m(1, 1, 1)),
196
+ ("This is a test", "This test", all_m(0.5, 0.5, 0.5)),
197
+ ]
198
+
199
+ self._apply_test_on(cases)
200
+
201
+ def test_input_gt_string_h_list(self):
202
+ cases = [
203
+ ("This is a test", ["This is a test"], all_m(0, 0, 0)),
204
+ ("This is a test", [""], all_m(1, 1, 1)),
205
+ ("This is a test", ["This test"], all_m(0.5, 0.5, 0.5)),
206
+ ]
207
+
208
+ self._apply_test_on(cases)
209
+
210
+ def test_input_gt_list_h_string(self):
211
+ cases = [
212
+ (["This is a test"], "This is a test", all_m(0, 0, 0)),
213
+ (["This is a test"], "", all_m(1, 1, 1)),
214
+ (["This is a test"], "This test", all_m(0.5, 0.5, 0.5)),
215
+ ]
216
+
217
+ self._apply_test_on(cases)
218
+
219
+ def test_input_gt_list_h_list(self):
220
+ cases = [
221
+ (["This is a test"], ["This is a test"], all_m(0, 0, 0)),
222
+ (["This is a test"], [""], all_m(1, 1, 1)),
223
+ (["This is a test"], ["This test"], all_m(0.5, 0.5, 0.5)),
224
+ ]
225
+
226
+ self._apply_test_on(cases)
227
+
228
+ def test_fail_on_different_sentence_length(self):
229
+ for method in [
230
+ jiwer.process_words,
231
+ jiwer.wer,
232
+ jiwer.wil,
233
+ jiwer.wip,
234
+ jiwer.mer,
235
+ jiwer.compute_measures,
236
+ ]:
237
+
238
+ def callback():
239
+ method(["hello", "this", "sentence", "is fractured"], ["this sentence"])
240
+
241
+ self.assertRaises(ValueError, callback)
242
+
243
+ def test_fail_on_empty_reference(self):
244
+ for method in [
245
+ jiwer.process_words,
246
+ jiwer.wer,
247
+ jiwer.wil,
248
+ jiwer.wip,
249
+ jiwer.mer,
250
+ jiwer.compute_measures,
251
+ ]:
252
+
253
+ def callback():
254
+ method("", "test")
255
+
256
+ self.assertRaises(ValueError, callback)
257
+
258
+ def test_known_values(self):
259
+ # Taken from the "From WER and RIL to MER and WIL" paper, for link see README.md
260
+ cases = [
261
+ (
262
+ "X",
263
+ "X",
264
+ all_m(0, 0, 0),
265
+ ),
266
+ (
267
+ "X",
268
+ "X X Y Y",
269
+ all_m(3, 0.75, 0.75),
270
+ ),
271
+ (
272
+ "X Y X",
273
+ "X Z",
274
+ all_m(2 / 3, 2 / 3, 5 / 6),
275
+ ),
276
+ (
277
+ "X",
278
+ "Y",
279
+ all_m(1, 1, 1),
280
+ ),
281
+ (
282
+ "X",
283
+ "Y Z",
284
+ all_m(2, 1, 1),
285
+ ),
286
+ ]
287
+
288
+ self._apply_test_on(cases)
289
+
290
+ def test_permutations_invariance(self):
291
+ cases = [
292
+ (
293
+ ["i", "am i good"],
294
+ ["i am", "i good"],
295
+ all_m(0.5, 0.4, 7 / 16),
296
+ ),
297
+ (
298
+ ["am i good", "i"],
299
+ [
300
+ "i good",
301
+ "i am",
302
+ ],
303
+ all_m(0.5, 0.4, 7 / 16),
304
+ ),
305
+ ]
306
+
307
+ self._apply_test_on(cases)
308
+
309
+ def _apply_test_on(self, cases):
310
+ for ref, hyp, correct_measures in cases:
311
+ output = jiwer.process_words(reference=ref, hypothesis=hyp)
312
+ output_dict = to_measure_dict(output)
313
+
314
+ assert_dict_almost_equal(self, output_dict, correct_measures, delta=1e-16)
315
+
316
+ with pytest.deprecated_call():
317
+ output = jiwer.compute_measures(truth=ref, hypothesis=hyp)
318
+ output_dict = {
319
+ "wer": output["wer"],
320
+ "mer": output["mer"],
321
+ "wil": output["wil"],
322
+ "wip": output["wip"],
323
+ }
324
+ assert_dict_almost_equal(
325
+ self, output_dict, correct_measures, delta=1e-16
326
+ )
327
+
328
+ self._apply_test_deprecated_truth(cases)
329
+
330
+ def _apply_test_deprecated_truth(self, cases):
331
+ with pytest.deprecated_call():
332
+ for ref, hyp, correct_measures in cases:
333
+ output_dict = {}
334
+ for key, method in [
335
+ ("wer", jiwer.wer),
336
+ ("mer", jiwer.mer),
337
+ ("wil", jiwer.wil),
338
+ ("wip", jiwer.wip),
339
+ ]:
340
+ output = method(truth=ref, hypothesis=hyp)
341
+ output_dict[key] = output
342
+
343
+ assert_dict_almost_equal(
344
+ self, output_dict, correct_measures, delta=1e-16
345
+ )
346
+
347
+ def test_deprecated_truth_and_ref(self):
348
+ for key, method in [
349
+ ("wer", jiwer.wer),
350
+ ("mer", jiwer.mer),
351
+ ("wil", jiwer.wil),
352
+ ("wip", jiwer.wip),
353
+ ("cer", jiwer.cer),
354
+ ]:
355
+ with pytest.raises(ValueError):
356
+ method(truth="ref", reference="truth", hypothesis="huh")
357
+ method()
358
+ method(truth="only truth")
359
+ method(reference="only ref")
360
+ method(hypothesis="only hypothesis")
361
+
362
+ def test_deprecated_truth_and_ref_with_transform(self):
363
+ wer_transform = jiwer.Compose(
364
+ [
365
+ jiwer.ToLowerCase(),
366
+ jiwer.RemoveMultipleSpaces(),
367
+ jiwer.Strip(),
368
+ jiwer.ReduceToListOfListOfWords(),
369
+ ]
370
+ )
371
+ cer_transform = jiwer.Compose(
372
+ [
373
+ jiwer.ToLowerCase(),
374
+ jiwer.RemoveMultipleSpaces(),
375
+ jiwer.Strip(),
376
+ jiwer.ReduceToListOfListOfChars(),
377
+ ]
378
+ )
379
+
380
+ for key, method in [
381
+ ("wer", jiwer.wer),
382
+ ("mer", jiwer.mer),
383
+ ("wil", jiwer.wil),
384
+ ("wip", jiwer.wip),
385
+ ("cer", jiwer.cer),
386
+ ]:
387
+ if key == "cer":
388
+ tr = cer_transform
389
+ else:
390
+ tr = wer_transform
391
+
392
+ result = method(
393
+ truth="This is a short Sentence with a few Words with upper and Lower cases",
394
+ hypothesis="His is a short Sentence with a few Words with upper and Lower cases",
395
+ truth_transform=tr,
396
+ hypothesis_transform=tr,
397
+ )
398
+ result_same = method(
399
+ reference="This is a short Sentence with a few Words with upper and Lower cases",
400
+ hypothesis="His is a short Sentence with a few Words with upper and Lower cases",
401
+ reference_transform=tr,
402
+ hypothesis_transform=tr,
403
+ )
404
+ self.assertAlmostEqual(result, result_same)
405
+
406
+
407
+ def test_deprecate_compute_measures():
408
+ # TODO: remove once deprecated
409
+ with pytest.deprecated_call():
410
+ jiwer.compute_measures("no more", "compute_measures")
jiwer/tests/test_speed.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from jiwer import wer
2
+
3
+
4
+ def perform_computation(num_sentences):
5
+ truth = ["this is a speed test" for _ in range(0, num_sentences)]
6
+ hypo = ["this is not a speed test" for _ in range(0, num_sentences)]
7
+
8
+ wer(truth, hypo)
9
+
10
+
11
+ def test_speed_n1(benchmark):
12
+ benchmark(perform_computation, 1)
13
+
14
+
15
+ def test_speed_n10(benchmark):
16
+ benchmark(perform_computation, 10)
17
+
18
+
19
+ def test_speed_n100(benchmark):
20
+ benchmark(perform_computation, 100)
21
+
22
+
23
+ def test_speed_n1000(benchmark):
24
+ benchmark(perform_computation, 1000)
jiwer/tests/test_transforms.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ from jiwer.transforms import *
4
+ from jiwer.transforms import ReduceToListOfListOfChars
5
+
6
+
7
+ def _apply_test_on(self: unittest.TestCase, tr, cases):
8
+ for inp, outp in cases:
9
+ self.assertEqual(outp, tr(inp))
10
+
11
+
12
+ class TestReduceToSingleSentence(unittest.TestCase):
13
+ def test_normal(self):
14
+ cases = [
15
+ ("this is a test", "this is a test"),
16
+ ("", ""),
17
+ (["this is one", "is two"], ["this is one is two"]),
18
+ (["one", "two", "three", "", "five six"], ["one two three five six"]),
19
+ ([""], []),
20
+ ]
21
+
22
+ _apply_test_on(self, ReduceToSingleSentence(), cases)
23
+
24
+ def test_delimiter(self):
25
+ cases = [
26
+ ("this_is_a_test", "this_is_a_test"),
27
+ ("", ""),
28
+ (["this_is_one", "is_two"], ["this_is_one_is_two"]),
29
+ (["one", "two", "three", "", "five_six"], ["one_two_three_five_six"]),
30
+ ([""], []),
31
+ ]
32
+
33
+ _apply_test_on(self, ReduceToSingleSentence("_"), cases)
34
+
35
+
36
+ class TestReduceToListOfListOfWords(unittest.TestCase):
37
+ def test_normal(self):
38
+ cases = [
39
+ ("this is a test", [["this", "is", "a", "test"]]),
40
+ ("", [[]]),
41
+ (["this is one", "is two"], [["this", "is", "one"], ["is", "two"]]),
42
+ (
43
+ ["one", "two", "three", "", "five six"],
44
+ [["one"], ["two"], ["three"], [], ["five", "six"]],
45
+ ),
46
+ ([], [[]]),
47
+ ([""], [[]]),
48
+ (["", "", ""], [[], [], []]),
49
+ ]
50
+
51
+ _apply_test_on(self, ReduceToListOfListOfWords(), cases)
52
+
53
+ def test_delimiter(self):
54
+ cases = [
55
+ ("this_is_a_test", [["this", "is", "a", "test"]]),
56
+ ("", [[]]),
57
+ (["this_is_one", "is_two"], [["this", "is", "one"], ["is", "two"]]),
58
+ (
59
+ ["one", "two", "three", "", "five_six"],
60
+ [["one"], ["two"], ["three"], [], ["five", "six"]],
61
+ ),
62
+ ([], [[]]),
63
+ ([""], [[]]),
64
+ (["", "", ""], [[], [], []]),
65
+ ]
66
+
67
+ _apply_test_on(self, ReduceToListOfListOfWords("_"), cases)
68
+
69
+
70
+ class TestReduceToListOfListOfChars(unittest.TestCase):
71
+ def test_normal(self):
72
+ cases = [
73
+ (
74
+ "this is a test",
75
+ [
76
+ [
77
+ "t",
78
+ "h",
79
+ "i",
80
+ "s",
81
+ " ",
82
+ "i",
83
+ "s",
84
+ " ",
85
+ "a",
86
+ " ",
87
+ "t",
88
+ "e",
89
+ "s",
90
+ "t",
91
+ ]
92
+ ],
93
+ ),
94
+ ("", [[]]),
95
+ (
96
+ ["this is one", "is two"],
97
+ [
98
+ ["t", "h", "i", "s", " ", "i", "s", " ", "o", "n", "e"],
99
+ ["i", "s", " ", "t", "w", "o"],
100
+ ],
101
+ ),
102
+ (
103
+ ["one", "two", "three", "", "five six"],
104
+ [
105
+ ["o", "n", "e"],
106
+ ["t", "w", "o"],
107
+ ["t", "h", "r", "e", "e"],
108
+ [],
109
+ ["f", "i", "v", "e", " ", "s", "i", "x"],
110
+ ],
111
+ ),
112
+ ([], [[]]),
113
+ ([""], [[]]),
114
+ (["", "", ""], [[], [], []]),
115
+ ]
116
+
117
+ _apply_test_on(self, ReduceToListOfListOfChars(), cases)
118
+
119
+ def test_delimiter(self):
120
+ cases = [
121
+ (
122
+ "this_is_a_test",
123
+ [
124
+ [
125
+ "t",
126
+ "h",
127
+ "i",
128
+ "s",
129
+ "_",
130
+ "i",
131
+ "s",
132
+ "_",
133
+ "a",
134
+ "_",
135
+ "t",
136
+ "e",
137
+ "s",
138
+ "t",
139
+ ]
140
+ ],
141
+ ),
142
+ ("", [[]]),
143
+ (
144
+ ["this_is_one", "is_two"],
145
+ [
146
+ ["t", "h", "i", "s", "_", "i", "s", "_", "o", "n", "e"],
147
+ ["i", "s", "_", "t", "w", "o"],
148
+ ],
149
+ ),
150
+ (
151
+ ["one", "two", "three", "", "five_six"],
152
+ [
153
+ ["o", "n", "e"],
154
+ ["t", "w", "o"],
155
+ ["t", "h", "r", "e", "e"],
156
+ [],
157
+ ["f", "i", "v", "e", "_", "s", "i", "x"],
158
+ ],
159
+ ),
160
+ ([], [[]]),
161
+ ([""], [[]]),
162
+ (["", "", ""], [[], [], []]),
163
+ ]
164
+
165
+ _apply_test_on(self, ReduceToListOfListOfChars(), cases)
166
+
167
+
168
+ class TestRemoveSpecificWords(unittest.TestCase):
169
+ def test_normal(self):
170
+ cases = [
171
+ (["yhe about that bug"], [" about that bug"]),
172
+ (["yeah about that bug"], [" about that bug"]),
173
+ (["one bug"], ["one bug"]),
174
+ (["yhe", "about", "bug"], [" ", "about", "bug"]),
175
+ (["yeah", "about", "bug"], [" ", "about", "bug"]),
176
+ (["one", "bug"], ["one", "bug"]),
177
+ (["yhe about bug"], [" about bug"]),
178
+ (["yeah about bug"], [" about bug"]),
179
+ (["about bug yhe"], ["about bug "]),
180
+ (["one bug"], ["one bug"]),
181
+ (["he asked a helpful question"], [" asked helpful question"]),
182
+ (["normal sentence"], ["normal sentence"]),
183
+ (["yhe awesome", " awesome"]),
184
+ (["the apple is not a pear", " apple is not pear"]),
185
+ (["yhe", " "]),
186
+ ]
187
+
188
+ _apply_test_on(
189
+ self, RemoveSpecificWords(["yhe", "yeah", "a", "he", "the"]), cases
190
+ )
191
+
192
+
193
+ class TestRemoveWhiteSpace(unittest.TestCase):
194
+ def test_normal(self):
195
+ cases = [
196
+ (["this is an example", "thisisanexample"]),
197
+ (["hello\tworld\n\r", "helloworld"]),
198
+ ]
199
+
200
+ _apply_test_on(self, RemoveWhiteSpace(), cases)
201
+
202
+ def test_replace_by_space(self):
203
+ cases = [
204
+ (["this is an example", "this is an example"]),
205
+ (["hello\tworld\n\r", "hello world "]),
206
+ ]
207
+
208
+ _apply_test_on(self, RemoveWhiteSpace(replace_by_space=True), cases)
209
+
210
+
211
+ class TestRemovePunctuation(unittest.TestCase):
212
+ def test_normal(self):
213
+ cases = [
214
+ (["this is an example!", "this is an example"]),
215
+ (["hello. goodbye", "hello goodbye"]),
216
+ (["this sentence has no punctuation", "this sentence has no punctuation"]),
217
+ ]
218
+
219
+ _apply_test_on(self, RemovePunctuation(), cases)
220
+
221
+ def test_non_ascii_punctuation(self):
222
+ cases = [
223
+ (["word༆’'", "word"]),
224
+ (["‘no’", "no"]),
225
+ (["“yes”", "yes"]),
226
+ ]
227
+
228
+ _apply_test_on(self, RemovePunctuation(), cases)
229
+
230
+
231
+ class TestRemoveMultipleSpaces(unittest.TestCase):
232
+ def test_normal(self):
233
+ cases = [
234
+ (["this is an example "], ["this is an example "]),
235
+ ([" hello goodbye "], [" hello goodbye "]),
236
+ ([" "], [" "]),
237
+ ]
238
+
239
+ _apply_test_on(self, RemoveMultipleSpaces(), cases)
240
+
241
+ pass
242
+
243
+
244
+ class TestSubstituteWords(unittest.TestCase):
245
+ def test_normal(self):
246
+ cases = [
247
+ (["you're pretty"], ["i am awesome"]),
248
+ (["your book"], ["your book"]),
249
+ (["foobar"], ["foobar"]),
250
+ ]
251
+
252
+ _apply_test_on(
253
+ self,
254
+ SubstituteWords(
255
+ {"pretty": "awesome", "you": "i", "'re": " am", "foo": "bar"}
256
+ ),
257
+ cases,
258
+ )
259
+
260
+
261
+ class TestSubstituteRegexes(unittest.TestCase):
262
+ def test_normal(self):
263
+ cases = [
264
+ (["is the world doomed or loved?"], ["is the world sacr or lov?"]),
265
+ (["the sun is loved"], ["the sun is lov"]),
266
+ (["edibles are allegedly cultivated"], ["edibles are allegedly cultivat"]),
267
+ ]
268
+
269
+ _apply_test_on(
270
+ self,
271
+ SubstituteRegexes({r"doom": r"sacr", r"\b(\w+)ed\b": r"\1"}),
272
+ cases,
273
+ )
274
+
275
+
276
+ class TestStrip(unittest.TestCase):
277
+ def test_normal(self):
278
+ cases = [
279
+ ([" this is an example "], ["this is an example"]),
280
+ ([" hello goodbye "], ["hello goodbye"]),
281
+ ([" "], [""]),
282
+ ([" "], [""]),
283
+ ]
284
+
285
+ _apply_test_on(self, Strip(), cases)
286
+
287
+
288
+ class TestRemoveEmptyStrings(unittest.TestCase):
289
+ def test_normal(self):
290
+ cases = [
291
+ ([""], []),
292
+ (["this is an example"], ["this is an example"]),
293
+ ([" "], []),
294
+ ([" "], []),
295
+ ]
296
+
297
+ _apply_test_on(self, RemoveEmptyStrings(), cases)
298
+
299
+
300
+ class TestExpandCommonEnglishContractions(unittest.TestCase):
301
+ def test_normal(self):
302
+ cases = [
303
+ (
304
+ ["she'll make sure you can't make it"],
305
+ ["she will make sure you can not make it"],
306
+ ),
307
+ (["let's party!"], ["let us party!"]),
308
+ ]
309
+
310
+ _apply_test_on(self, ExpandCommonEnglishContractions(), cases)
311
+
312
+
313
+ class TestToLowerCase(unittest.TestCase):
314
+ def test_normal(self):
315
+ cases = [
316
+ (["You're PRETTY"], ["you're pretty"]),
317
+ ]
318
+
319
+ _apply_test_on(self, ToLowerCase(), cases)
320
+
321
+
322
+ class TestToUpperCase(unittest.TestCase):
323
+ def test_normal(self):
324
+ cases = [
325
+ (["You're amazing"], ["YOU'RE AMAZING"]),
326
+ ]
327
+
328
+ _apply_test_on(self, ToUpperCase(), cases)
329
+
330
+
331
+ class TestRemoveKaldiNonWords(unittest.TestCase):
332
+ def test_normal(self):
333
+ cases = [
334
+ (["you <unk> like [laugh]"], ["you like "]),
335
+ ]
336
+
337
+ _apply_test_on(self, RemoveKaldiNonWords(), cases)