Rolv-Arild
commited on
Commit
•
0614925
1
Parent(s):
aa7bcfb
Training in progress, step 500
Browse files- .gitattributes +1 -0
- .gitignore +1 -0
- added_tokens.json +1 -0
- config.json +107 -0
- preprocessor_config.json +9 -0
- pytorch_model.bin +3 -0
- run.sh +41 -0
- run_speech_recognition_ctc.py +773 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- training_args.bin +3 -0
- vocab.json +1 -0
- wandb/debug-internal.log +1 -0
- wandb/debug.log +1 -0
- wandb/latest-run +1 -0
- wandb/run-20220830_110431-yvlr8ud4/files/config.yaml +0 -0
- wandb/run-20220830_110431-yvlr8ud4/files/output.log +2653 -0
- wandb/run-20220830_110431-yvlr8ud4/files/requirements.txt +77 -0
- wandb/run-20220830_110431-yvlr8ud4/files/wandb-metadata.json +64 -0
- wandb/run-20220830_110431-yvlr8ud4/files/wandb-summary.json +0 -0
- wandb/run-20220830_110431-yvlr8ud4/logs/debug-internal.log +0 -0
- wandb/run-20220830_110431-yvlr8ud4/logs/debug.log +27 -0
- wandb/run-20220830_110431-yvlr8ud4/run-yvlr8ud4.wandb +3 -0
.gitattributes
CHANGED
@@ -29,3 +29,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.wandb filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
checkpoint-*/
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 39, "</s>": 40}
|
config.json
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-xls-r-1b",
|
3 |
+
"activation_dropout": 0.055,
|
4 |
+
"adapter_kernel_size": 3,
|
5 |
+
"adapter_stride": 2,
|
6 |
+
"add_adapter": false,
|
7 |
+
"apply_spec_augment": true,
|
8 |
+
"architectures": [
|
9 |
+
"Wav2Vec2ForCTC"
|
10 |
+
],
|
11 |
+
"attention_dropout": 0.094,
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"classifier_proj_size": 256,
|
14 |
+
"codevector_dim": 1024,
|
15 |
+
"contrastive_logits_temperature": 0.1,
|
16 |
+
"conv_bias": true,
|
17 |
+
"conv_dim": [
|
18 |
+
512,
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512
|
25 |
+
],
|
26 |
+
"conv_kernel": [
|
27 |
+
10,
|
28 |
+
3,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
2,
|
33 |
+
2
|
34 |
+
],
|
35 |
+
"conv_stride": [
|
36 |
+
5,
|
37 |
+
2,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2
|
43 |
+
],
|
44 |
+
"ctc_loss_reduction": "mean",
|
45 |
+
"ctc_zero_infinity": true,
|
46 |
+
"diversity_loss_weight": 0.1,
|
47 |
+
"do_stable_layer_norm": true,
|
48 |
+
"eos_token_id": 2,
|
49 |
+
"feat_extract_activation": "gelu",
|
50 |
+
"feat_extract_dropout": 0.0,
|
51 |
+
"feat_extract_norm": "layer",
|
52 |
+
"feat_proj_dropout": 0.04,
|
53 |
+
"feat_quantizer_dropout": 0.0,
|
54 |
+
"final_dropout": 0.0,
|
55 |
+
"hidden_act": "gelu",
|
56 |
+
"hidden_dropout": 0.047,
|
57 |
+
"hidden_size": 1280,
|
58 |
+
"initializer_range": 0.02,
|
59 |
+
"intermediate_size": 5120,
|
60 |
+
"layer_norm_eps": 1e-05,
|
61 |
+
"layerdrop": 0.041,
|
62 |
+
"mask_feature_length": 64,
|
63 |
+
"mask_feature_min_masks": 0,
|
64 |
+
"mask_feature_prob": 0.25,
|
65 |
+
"mask_time_length": 10,
|
66 |
+
"mask_time_min_masks": 2,
|
67 |
+
"mask_time_prob": 0.082,
|
68 |
+
"model_type": "wav2vec2",
|
69 |
+
"num_adapter_layers": 3,
|
70 |
+
"num_attention_heads": 16,
|
71 |
+
"num_codevector_groups": 2,
|
72 |
+
"num_codevectors_per_group": 320,
|
73 |
+
"num_conv_pos_embedding_groups": 16,
|
74 |
+
"num_conv_pos_embeddings": 128,
|
75 |
+
"num_feat_extract_layers": 7,
|
76 |
+
"num_hidden_layers": 48,
|
77 |
+
"num_negatives": 100,
|
78 |
+
"output_hidden_size": 1280,
|
79 |
+
"pad_token_id": 38,
|
80 |
+
"proj_codevector_dim": 1024,
|
81 |
+
"tdnn_dilation": [
|
82 |
+
1,
|
83 |
+
2,
|
84 |
+
3,
|
85 |
+
1,
|
86 |
+
1
|
87 |
+
],
|
88 |
+
"tdnn_dim": [
|
89 |
+
512,
|
90 |
+
512,
|
91 |
+
512,
|
92 |
+
512,
|
93 |
+
1500
|
94 |
+
],
|
95 |
+
"tdnn_kernel": [
|
96 |
+
5,
|
97 |
+
3,
|
98 |
+
3,
|
99 |
+
1,
|
100 |
+
1
|
101 |
+
],
|
102 |
+
"torch_dtype": "float32",
|
103 |
+
"transformers_version": "4.18.0",
|
104 |
+
"use_weighted_layer_sum": false,
|
105 |
+
"vocab_size": 41,
|
106 |
+
"xvector_output_dim": 512
|
107 |
+
}
|
preprocessor_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0,
|
7 |
+
"return_attention_mask": true,
|
8 |
+
"sampling_rate": 16000
|
9 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7a7668aeda50b6bccf6c723f8337046bbe416bd5e5112f197586ade2af090293
|
3 |
+
size 3850475057
|
run.sh
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
WANDB_ENTITY=NbAiLab WANDB_PROJECT=wav2vec2 python run_speech_recognition_ctc.py \
|
2 |
+
--model_name_or_path="facebook/wav2vec2-xls-r-1b" \
|
3 |
+
--hub_model_id="NbAiLab/wav2vec2-1b-nst" \
|
4 |
+
--dataset_name="NbAiLab/NST" \
|
5 |
+
--dataset_config="no-close" \
|
6 |
+
--output_dir="./" \
|
7 |
+
--overwrite_output_dir \
|
8 |
+
--num_train_epochs="40" \
|
9 |
+
--per_device_train_batch_size="12" \
|
10 |
+
--per_device_eval_batch_size="12" \
|
11 |
+
--gradient_accumulation_steps="2" \
|
12 |
+
--learning_rate="2e-5" \
|
13 |
+
--warmup_steps="2000" \
|
14 |
+
--length_column_name="input_length" \
|
15 |
+
--evaluation_strategy="steps" \
|
16 |
+
--text_column_name="text" \
|
17 |
+
--save_steps="500" \
|
18 |
+
--eval_steps="500" \
|
19 |
+
--logging_steps="100" \
|
20 |
+
--layerdrop="0.041" \
|
21 |
+
--attention_dropout="0.094" \
|
22 |
+
--activation_dropout="0.055" \
|
23 |
+
--hidden_dropout="0.047" \
|
24 |
+
--save_total_limit="3" \
|
25 |
+
--freeze_feature_encoder \
|
26 |
+
--feat_proj_dropout="0.04" \
|
27 |
+
--mask_time_prob="0.082" \
|
28 |
+
--mask_time_length="10" \
|
29 |
+
--mask_feature_prob="0.25" \
|
30 |
+
--mask_feature_length="64" \
|
31 |
+
--gradient_checkpointing \
|
32 |
+
--min_duration_in_seconds="0.5" \
|
33 |
+
--max_duration_in_seconds="30.0" \
|
34 |
+
--use_auth_token \
|
35 |
+
--seed="42" \
|
36 |
+
--fp16 \
|
37 |
+
--group_by_length \
|
38 |
+
--do_train --do_eval \
|
39 |
+
--push_to_hub \
|
40 |
+
--preprocessing_num_workers="32" \
|
41 |
+
--ctc_zero_infinity
|
run_speech_recognition_ctc.py
ADDED
@@ -0,0 +1,773 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# coding=utf-8
|
3 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
|
16 |
+
""" Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
|
17 |
+
|
18 |
+
import functools
|
19 |
+
import json
|
20 |
+
import logging
|
21 |
+
import os
|
22 |
+
import re
|
23 |
+
import sys
|
24 |
+
import warnings
|
25 |
+
from dataclasses import dataclass, field
|
26 |
+
from typing import Dict, List, Optional, Union
|
27 |
+
|
28 |
+
import datasets
|
29 |
+
import numpy as np
|
30 |
+
import torch
|
31 |
+
from datasets import DatasetDict, load_dataset, load_metric
|
32 |
+
|
33 |
+
import transformers
|
34 |
+
from transformers import (
|
35 |
+
AutoConfig,
|
36 |
+
AutoFeatureExtractor,
|
37 |
+
AutoModelForCTC,
|
38 |
+
AutoProcessor,
|
39 |
+
AutoTokenizer,
|
40 |
+
HfArgumentParser,
|
41 |
+
Trainer,
|
42 |
+
TrainingArguments,
|
43 |
+
Wav2Vec2Processor,
|
44 |
+
set_seed,
|
45 |
+
)
|
46 |
+
from transformers.trainer_utils import get_last_checkpoint, is_main_process
|
47 |
+
from transformers.utils import check_min_version
|
48 |
+
from transformers.utils.versions import require_version
|
49 |
+
|
50 |
+
|
51 |
+
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
52 |
+
check_min_version("4.16.0.dev0")
|
53 |
+
|
54 |
+
require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
55 |
+
|
56 |
+
|
57 |
+
logger = logging.getLogger(__name__)
|
58 |
+
|
59 |
+
|
60 |
+
def list_field(default=None, metadata=None):
|
61 |
+
return field(default_factory=lambda: default, metadata=metadata)
|
62 |
+
|
63 |
+
|
64 |
+
@dataclass
|
65 |
+
class ModelArguments:
|
66 |
+
"""
|
67 |
+
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
68 |
+
"""
|
69 |
+
|
70 |
+
model_name_or_path: str = field(
|
71 |
+
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
72 |
+
)
|
73 |
+
tokenizer_name_or_path: Optional[str] = field(
|
74 |
+
default=None,
|
75 |
+
metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
|
76 |
+
)
|
77 |
+
cache_dir: Optional[str] = field(
|
78 |
+
default=None,
|
79 |
+
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
|
80 |
+
)
|
81 |
+
freeze_feature_encoder: bool = field(
|
82 |
+
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
|
83 |
+
)
|
84 |
+
attention_dropout: float = field(
|
85 |
+
default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
|
86 |
+
)
|
87 |
+
activation_dropout: float = field(
|
88 |
+
default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
|
89 |
+
)
|
90 |
+
feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
|
91 |
+
hidden_dropout: float = field(
|
92 |
+
default=0.0,
|
93 |
+
metadata={
|
94 |
+
"help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
|
95 |
+
},
|
96 |
+
)
|
97 |
+
final_dropout: float = field(
|
98 |
+
default=0.0,
|
99 |
+
metadata={"help": "The dropout probability for the final projection layer."},
|
100 |
+
)
|
101 |
+
mask_time_prob: float = field(
|
102 |
+
default=0.05,
|
103 |
+
metadata={
|
104 |
+
"help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
|
105 |
+
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
|
106 |
+
"vectors will be masked along the time axis."
|
107 |
+
},
|
108 |
+
)
|
109 |
+
mask_time_length: int = field(
|
110 |
+
default=10,
|
111 |
+
metadata={"help": "Length of vector span to mask along the time axis."},
|
112 |
+
)
|
113 |
+
mask_feature_prob: float = field(
|
114 |
+
default=0.0,
|
115 |
+
metadata={
|
116 |
+
"help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
|
117 |
+
"span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
|
118 |
+
},
|
119 |
+
)
|
120 |
+
mask_feature_length: int = field(
|
121 |
+
default=10,
|
122 |
+
metadata={"help": "Length of vector span to mask along the feature axis."},
|
123 |
+
)
|
124 |
+
layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
|
125 |
+
ctc_loss_reduction: Optional[str] = field(
|
126 |
+
default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
|
127 |
+
)
|
128 |
+
ctc_zero_infinity: Optional[bool] = field(
|
129 |
+
default=False, metadata={"help": "If True, will try yo aboud the CTC loss goinf to infinity."}
|
130 |
+
)
|
131 |
+
|
132 |
+
@dataclass
|
133 |
+
class DataTrainingArguments:
|
134 |
+
"""
|
135 |
+
Arguments pertaining to what data we are going to input our model for training and eval.
|
136 |
+
|
137 |
+
Using `HfArgumentParser` we can turn this class
|
138 |
+
into argparse arguments to be able to specify them on
|
139 |
+
the command line.
|
140 |
+
"""
|
141 |
+
|
142 |
+
dataset_name: str = field(
|
143 |
+
metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
144 |
+
)
|
145 |
+
dataset_config_name: str = field(
|
146 |
+
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
147 |
+
)
|
148 |
+
train_split_name: str = field(
|
149 |
+
default="train",
|
150 |
+
metadata={
|
151 |
+
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
|
152 |
+
},
|
153 |
+
)
|
154 |
+
eval_split_name: str = field(
|
155 |
+
default="test",
|
156 |
+
metadata={
|
157 |
+
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
|
158 |
+
},
|
159 |
+
)
|
160 |
+
audio_column_name: str = field(
|
161 |
+
default="audio",
|
162 |
+
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
|
163 |
+
)
|
164 |
+
text_column_name: str = field(
|
165 |
+
default="text",
|
166 |
+
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
|
167 |
+
)
|
168 |
+
overwrite_cache: bool = field(
|
169 |
+
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
170 |
+
)
|
171 |
+
preprocessing_num_workers: Optional[int] = field(
|
172 |
+
default=None,
|
173 |
+
metadata={"help": "The number of processes to use for the preprocessing."},
|
174 |
+
)
|
175 |
+
max_train_samples: Optional[int] = field(
|
176 |
+
default=None,
|
177 |
+
metadata={
|
178 |
+
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
|
179 |
+
"value if set."
|
180 |
+
},
|
181 |
+
)
|
182 |
+
max_eval_samples: Optional[int] = field(
|
183 |
+
default=None,
|
184 |
+
metadata={
|
185 |
+
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
|
186 |
+
"value if set."
|
187 |
+
},
|
188 |
+
)
|
189 |
+
chars_to_ignore: Optional[List[str]] = list_field(
|
190 |
+
default=None,
|
191 |
+
metadata={"help": "A list of characters to remove from the transcripts."},
|
192 |
+
)
|
193 |
+
eval_metrics: List[str] = list_field(
|
194 |
+
default=["wer"],
|
195 |
+
metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
|
196 |
+
)
|
197 |
+
max_duration_in_seconds: float = field(
|
198 |
+
default=20.0,
|
199 |
+
metadata={
|
200 |
+
"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
|
201 |
+
},
|
202 |
+
)
|
203 |
+
min_duration_in_seconds: float = field(
|
204 |
+
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
|
205 |
+
)
|
206 |
+
preprocessing_only: bool = field(
|
207 |
+
default=False,
|
208 |
+
metadata={
|
209 |
+
"help": "Whether to only do data preprocessing and skip training. "
|
210 |
+
"This is especially useful when data preprocessing errors out in distributed training due to timeout. "
|
211 |
+
"In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
|
212 |
+
"so that the cached datasets can consequently be loaded in distributed training"
|
213 |
+
},
|
214 |
+
)
|
215 |
+
use_auth_token: bool = field(
|
216 |
+
default=False,
|
217 |
+
metadata={
|
218 |
+
"help": "If :obj:`True`, will use the token generated when running"
|
219 |
+
":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
|
220 |
+
},
|
221 |
+
)
|
222 |
+
unk_token: str = field(
|
223 |
+
default="[UNK]",
|
224 |
+
metadata={"help": "The unk token for the tokenizer"},
|
225 |
+
)
|
226 |
+
pad_token: str = field(
|
227 |
+
default="[PAD]",
|
228 |
+
metadata={"help": "The padding token for the tokenizer"},
|
229 |
+
)
|
230 |
+
word_delimiter_token: str = field(
|
231 |
+
default="|",
|
232 |
+
metadata={"help": "The word delimiter token for the tokenizer"},
|
233 |
+
)
|
234 |
+
phoneme_language: Optional[str] = field(
|
235 |
+
default=None,
|
236 |
+
metadata={
|
237 |
+
"help": "The target language that should be used be"
|
238 |
+
" passed to the tokenizer for tokenization. Note that"
|
239 |
+
" this is only relevant if the model classifies the"
|
240 |
+
" input audio to a sequence of phoneme sequences."
|
241 |
+
},
|
242 |
+
)
|
243 |
+
|
244 |
+
|
245 |
+
@dataclass
|
246 |
+
class DataCollatorCTCWithPadding:
|
247 |
+
"""
|
248 |
+
Data collator that will dynamically pad the inputs received.
|
249 |
+
Args:
|
250 |
+
processor (:class:`~transformers.AutoProcessor`)
|
251 |
+
The processor used for proccessing the data.
|
252 |
+
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
|
253 |
+
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
|
254 |
+
among:
|
255 |
+
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
|
256 |
+
sequence if provided).
|
257 |
+
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
|
258 |
+
maximum acceptable input length for the model if that argument is not provided.
|
259 |
+
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
|
260 |
+
different lengths).
|
261 |
+
max_length (:obj:`int`, `optional`):
|
262 |
+
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
|
263 |
+
max_length_labels (:obj:`int`, `optional`):
|
264 |
+
Maximum length of the ``labels`` returned list and optionally padding length (see above).
|
265 |
+
pad_to_multiple_of (:obj:`int`, `optional`):
|
266 |
+
If set will pad the sequence to a multiple of the provided value.
|
267 |
+
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
|
268 |
+
7.5 (Volta).
|
269 |
+
"""
|
270 |
+
|
271 |
+
processor: AutoProcessor
|
272 |
+
padding: Union[bool, str] = "longest"
|
273 |
+
pad_to_multiple_of: Optional[int] = None
|
274 |
+
pad_to_multiple_of_labels: Optional[int] = None
|
275 |
+
|
276 |
+
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
|
277 |
+
# split inputs and labels since they have to be of different lenghts and need
|
278 |
+
# different padding methods
|
279 |
+
input_features = [{"input_values": feature["input_values"]} for feature in features]
|
280 |
+
label_features = [{"input_ids": feature["labels"]} for feature in features]
|
281 |
+
|
282 |
+
batch = self.processor.pad(
|
283 |
+
input_features,
|
284 |
+
padding=self.padding,
|
285 |
+
pad_to_multiple_of=self.pad_to_multiple_of,
|
286 |
+
return_tensors="pt",
|
287 |
+
)
|
288 |
+
|
289 |
+
with self.processor.as_target_processor():
|
290 |
+
labels_batch = self.processor.pad(
|
291 |
+
label_features,
|
292 |
+
padding=self.padding,
|
293 |
+
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
294 |
+
return_tensors="pt",
|
295 |
+
)
|
296 |
+
|
297 |
+
# replace padding with -100 to ignore loss correctly
|
298 |
+
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
299 |
+
|
300 |
+
batch["labels"] = labels
|
301 |
+
|
302 |
+
return batch
|
303 |
+
|
304 |
+
|
305 |
+
def create_vocabulary_from_data(
|
306 |
+
datasets: DatasetDict,
|
307 |
+
word_delimiter_token: Optional[str] = None,
|
308 |
+
unk_token: Optional[str] = None,
|
309 |
+
pad_token: Optional[str] = None,
|
310 |
+
):
|
311 |
+
# Given training and test labels create vocabulary
|
312 |
+
def extract_all_chars(batch):
|
313 |
+
all_text = " ".join(batch["target_text"])
|
314 |
+
vocab = list(set(all_text))
|
315 |
+
return {"vocab": [vocab], "all_text": [all_text]}
|
316 |
+
|
317 |
+
vocabs = datasets.map(
|
318 |
+
extract_all_chars,
|
319 |
+
batched=True,
|
320 |
+
batch_size=-1,
|
321 |
+
keep_in_memory=True,
|
322 |
+
remove_columns=datasets["train"].column_names,
|
323 |
+
)
|
324 |
+
|
325 |
+
# take union of all unique characters in each dataset
|
326 |
+
vocab_set = functools.reduce(
|
327 |
+
lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
|
328 |
+
)
|
329 |
+
|
330 |
+
vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
|
331 |
+
|
332 |
+
# replace white space with delimiter token
|
333 |
+
if word_delimiter_token is not None:
|
334 |
+
vocab_dict[word_delimiter_token] = vocab_dict[" "]
|
335 |
+
del vocab_dict[" "]
|
336 |
+
|
337 |
+
# add unk and pad token
|
338 |
+
if unk_token is not None:
|
339 |
+
vocab_dict[unk_token] = len(vocab_dict)
|
340 |
+
|
341 |
+
if pad_token is not None:
|
342 |
+
vocab_dict[pad_token] = len(vocab_dict)
|
343 |
+
|
344 |
+
return vocab_dict
|
345 |
+
|
346 |
+
|
347 |
+
def main():
|
348 |
+
# See all possible arguments in src/transformers/training_args.py
|
349 |
+
# or by passing the --help flag to this script.
|
350 |
+
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
351 |
+
|
352 |
+
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
353 |
+
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
354 |
+
# If we pass only one argument to the script and it's the path to a json file,
|
355 |
+
# let's parse it to get our arguments.
|
356 |
+
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
357 |
+
else:
|
358 |
+
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
359 |
+
|
360 |
+
# Detecting last checkpoint.
|
361 |
+
last_checkpoint = None
|
362 |
+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
363 |
+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
|
364 |
+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
|
365 |
+
raise ValueError(
|
366 |
+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
|
367 |
+
"Use --overwrite_output_dir to overcome."
|
368 |
+
)
|
369 |
+
elif last_checkpoint is not None:
|
370 |
+
logger.info(
|
371 |
+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
|
372 |
+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
|
373 |
+
)
|
374 |
+
|
375 |
+
# Setup logging
|
376 |
+
logging.basicConfig(
|
377 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
378 |
+
datefmt="%m/%d/%Y %H:%M:%S",
|
379 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
380 |
+
)
|
381 |
+
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
|
382 |
+
|
383 |
+
# Log on each process the small summary:
|
384 |
+
logger.warning(
|
385 |
+
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
|
386 |
+
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
|
387 |
+
)
|
388 |
+
# Set the verbosity to info of the Transformers logger (on main process only):
|
389 |
+
if is_main_process(training_args.local_rank):
|
390 |
+
transformers.utils.logging.set_verbosity_info()
|
391 |
+
logger.info("Training/evaluation parameters %s", training_args)
|
392 |
+
|
393 |
+
# Set seed before initializing model.
|
394 |
+
set_seed(training_args.seed)
|
395 |
+
|
396 |
+
# Pre-processing dataset
|
397 |
+
import re
|
398 |
+
|
399 |
+
def map_dataset(entry):
|
400 |
+
text = entry["text"].lower()
|
401 |
+
text = text.replace("(...Vær stille under dette opptaket...)", "")
|
402 |
+
text = re.sub('[áàâ]', 'a', text)
|
403 |
+
text = re.sub('[ä]', 'æ', text)
|
404 |
+
text = re.sub('[éèëê]', 'e', text)
|
405 |
+
text = re.sub('[íìïî]', 'i', text)
|
406 |
+
text = re.sub('[óòöô]', 'o', text)
|
407 |
+
text = re.sub('[ö]', 'ø', text)
|
408 |
+
text = re.sub('[ç]', 'c', text)
|
409 |
+
text = re.sub('[úùüû]', 'u', text)
|
410 |
+
# text = re.sub('\\(?=(Punktum|Komma|Utropstegn|Spørsmålstegn))', ' ', text)
|
411 |
+
text = re.sub('\s+', ' ', text)
|
412 |
+
return {"text": text}
|
413 |
+
|
414 |
+
|
415 |
+
def filter_dataset(entry):
|
416 |
+
if not (len(entry["text"]) <= len(entry["audio"]["array"]) // 320) and (len(entry["text"].strip()) >= 3):
|
417 |
+
return False # Too short
|
418 |
+
if re.match(entry["type"], "pIW|CA"):
|
419 |
+
return False # Spelling out words
|
420 |
+
return True
|
421 |
+
|
422 |
+
# 1. First, let's load the dataset
|
423 |
+
raw_datasets = DatasetDict()
|
424 |
+
|
425 |
+
if training_args.do_train:
|
426 |
+
raw_datasets["train"] = load_dataset(
|
427 |
+
data_args.dataset_name,
|
428 |
+
data_args.dataset_config_name,
|
429 |
+
split=data_args.train_split_name,
|
430 |
+
use_auth_token=data_args.use_auth_token,
|
431 |
+
).shuffle()
|
432 |
+
raw_datasets["train"] = raw_datasets["train"].filter(filter_dataset)
|
433 |
+
raw_datasets["train"] = raw_datasets["train"].map(map_dataset)
|
434 |
+
|
435 |
+
if data_args.audio_column_name not in raw_datasets["train"].column_names:
|
436 |
+
raise ValueError(
|
437 |
+
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
|
438 |
+
"Make sure to set `--audio_column_name` to the correct audio column - one of "
|
439 |
+
f"{', '.join(raw_datasets['train'].column_names)}."
|
440 |
+
)
|
441 |
+
|
442 |
+
if data_args.text_column_name not in raw_datasets["train"].column_names:
|
443 |
+
raise ValueError(
|
444 |
+
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
|
445 |
+
"Make sure to set `--text_column_name` to the correct text column - one of "
|
446 |
+
f"{', '.join(raw_datasets['train'].column_names)}."
|
447 |
+
)
|
448 |
+
|
449 |
+
if data_args.max_train_samples is not None:
|
450 |
+
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
|
451 |
+
|
452 |
+
if training_args.do_eval:
|
453 |
+
raw_datasets["eval"] = load_dataset(
|
454 |
+
data_args.dataset_name,
|
455 |
+
data_args.dataset_config_name,
|
456 |
+
split=data_args.eval_split_name,
|
457 |
+
use_auth_token=data_args.use_auth_token,
|
458 |
+
).shuffle()
|
459 |
+
raw_datasets["eval"] = raw_datasets["eval"].filter(filter_dataset)
|
460 |
+
raw_datasets["eval"] = raw_datasets["eval"].map(map_dataset)
|
461 |
+
|
462 |
+
if data_args.max_eval_samples is not None:
|
463 |
+
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
|
464 |
+
|
465 |
+
|
466 |
+
# 2. We remove some special characters from the datasets
|
467 |
+
# that make training complicated and do not help in transcribing the speech
|
468 |
+
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
|
469 |
+
# that could be easily picked up by the model
|
470 |
+
#chars_to_ignore_regex = (
|
471 |
+
# f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
|
472 |
+
#)
|
473 |
+
chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\'\–\_\\\+\#\/]'
|
474 |
+
|
475 |
+
text_column_name = data_args.text_column_name
|
476 |
+
|
477 |
+
def remove_special_characters(batch):
|
478 |
+
if chars_to_ignore_regex is not None:
|
479 |
+
batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
|
480 |
+
else:
|
481 |
+
batch["target_text"] = batch[text_column_name].lower() + " "
|
482 |
+
return batch
|
483 |
+
|
484 |
+
with training_args.main_process_first(desc="dataset map special characters removal"):
|
485 |
+
raw_datasets = raw_datasets.map(
|
486 |
+
remove_special_characters,
|
487 |
+
remove_columns=[text_column_name],
|
488 |
+
desc="remove special characters from datasets",
|
489 |
+
)
|
490 |
+
|
491 |
+
# save special tokens for tokenizer
|
492 |
+
word_delimiter_token = data_args.word_delimiter_token
|
493 |
+
unk_token = data_args.unk_token
|
494 |
+
pad_token = data_args.pad_token
|
495 |
+
|
496 |
+
# 3. Next, let's load the config as we might need it to create
|
497 |
+
# the tokenizer
|
498 |
+
# load config
|
499 |
+
config = AutoConfig.from_pretrained(
|
500 |
+
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
|
501 |
+
)
|
502 |
+
|
503 |
+
# 4. Next, if no tokenizer file is defined,
|
504 |
+
# we create the vocabulary of the model by extracting all unique characters from
|
505 |
+
# the training and evaluation datasets
|
506 |
+
# We need to make sure that only first rank saves vocabulary
|
507 |
+
# make sure all processes wait until vocab is created
|
508 |
+
tokenizer_name_or_path = model_args.tokenizer_name_or_path
|
509 |
+
tokenizer_kwargs = {}
|
510 |
+
if tokenizer_name_or_path is None:
|
511 |
+
# save vocab in training output dir
|
512 |
+
tokenizer_name_or_path = training_args.output_dir
|
513 |
+
|
514 |
+
vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
|
515 |
+
|
516 |
+
with training_args.main_process_first():
|
517 |
+
if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
|
518 |
+
os.remove(vocab_file)
|
519 |
+
|
520 |
+
with training_args.main_process_first(desc="dataset map vocabulary creation"):
|
521 |
+
if not os.path.isfile(vocab_file):
|
522 |
+
os.makedirs(tokenizer_name_or_path, exist_ok=True)
|
523 |
+
vocab_dict = create_vocabulary_from_data(
|
524 |
+
raw_datasets,
|
525 |
+
word_delimiter_token=word_delimiter_token,
|
526 |
+
unk_token=unk_token,
|
527 |
+
pad_token=pad_token,
|
528 |
+
)
|
529 |
+
|
530 |
+
# save vocab dict to be loaded into tokenizer
|
531 |
+
with open(vocab_file, "w") as file:
|
532 |
+
json.dump(vocab_dict, file)
|
533 |
+
|
534 |
+
# if tokenizer has just been created
|
535 |
+
# it is defined by `tokenizer_class` if present in config else by `model_type`
|
536 |
+
tokenizer_kwargs = {
|
537 |
+
"config": config if config.tokenizer_class is not None else None,
|
538 |
+
"tokenizer_type": config.model_type if config.tokenizer_class is None else None,
|
539 |
+
"unk_token": unk_token,
|
540 |
+
"pad_token": pad_token,
|
541 |
+
"word_delimiter_token": word_delimiter_token,
|
542 |
+
}
|
543 |
+
|
544 |
+
# 5. Now we can instantiate the feature extractor, tokenizer and model
|
545 |
+
# Note for distributed training, the .from_pretrained methods guarantee that only
|
546 |
+
# one local process can concurrently download model & vocab.
|
547 |
+
|
548 |
+
# load feature_extractor and tokenizer
|
549 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
550 |
+
tokenizer_name_or_path,
|
551 |
+
use_auth_token=data_args.use_auth_token,
|
552 |
+
**tokenizer_kwargs,
|
553 |
+
)
|
554 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(
|
555 |
+
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
|
556 |
+
)
|
557 |
+
|
558 |
+
# adapt config
|
559 |
+
config.update(
|
560 |
+
{
|
561 |
+
"feat_proj_dropout": model_args.feat_proj_dropout,
|
562 |
+
"attention_dropout": model_args.attention_dropout,
|
563 |
+
"hidden_dropout": model_args.hidden_dropout,
|
564 |
+
"final_dropout": model_args.final_dropout,
|
565 |
+
"mask_time_prob": model_args.mask_time_prob,
|
566 |
+
"mask_time_length": model_args.mask_time_length,
|
567 |
+
"mask_feature_prob": model_args.mask_feature_prob,
|
568 |
+
"mask_feature_length": model_args.mask_feature_length,
|
569 |
+
"gradient_checkpointing": training_args.gradient_checkpointing,
|
570 |
+
"layerdrop": model_args.layerdrop,
|
571 |
+
"ctc_loss_reduction": model_args.ctc_loss_reduction,
|
572 |
+
"ctc_zero_infinity": model_args.ctc_zero_infinity,
|
573 |
+
"pad_token_id": tokenizer.pad_token_id,
|
574 |
+
"vocab_size": len(tokenizer),
|
575 |
+
"activation_dropout": model_args.activation_dropout,
|
576 |
+
}
|
577 |
+
)
|
578 |
+
|
579 |
+
# create model
|
580 |
+
model = AutoModelForCTC.from_pretrained(
|
581 |
+
model_args.model_name_or_path,
|
582 |
+
cache_dir=model_args.cache_dir,
|
583 |
+
config=config,
|
584 |
+
use_auth_token=data_args.use_auth_token,
|
585 |
+
)
|
586 |
+
|
587 |
+
# freeze encoder
|
588 |
+
if model_args.freeze_feature_encoder:
|
589 |
+
model.freeze_feature_encoder()
|
590 |
+
|
591 |
+
# 6. Now we preprocess the datasets including loading the audio, resampling and normalization
|
592 |
+
# Thankfully, `datasets` takes care of automatically loading and resampling the audio,
|
593 |
+
# so that we just need to set the correct target sampling rate and normalize the input
|
594 |
+
# via the `feature_extractor`
|
595 |
+
|
596 |
+
# make sure that dataset decodes audio with correct sampling rate
|
597 |
+
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
|
598 |
+
if dataset_sampling_rate != feature_extractor.sampling_rate:
|
599 |
+
raw_datasets = raw_datasets.cast_column(
|
600 |
+
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
|
601 |
+
)
|
602 |
+
|
603 |
+
# derive max & min input length for sample rate & max duration
|
604 |
+
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
|
605 |
+
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
|
606 |
+
audio_column_name = data_args.audio_column_name
|
607 |
+
num_workers = data_args.preprocessing_num_workers
|
608 |
+
|
609 |
+
# `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
|
610 |
+
phoneme_language = data_args.phoneme_language
|
611 |
+
|
612 |
+
# Preprocessing the datasets.
|
613 |
+
# We need to read the audio files as arrays and tokenize the targets.
|
614 |
+
def prepare_dataset(batch):
|
615 |
+
# load audio
|
616 |
+
sample = batch[audio_column_name]
|
617 |
+
|
618 |
+
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
|
619 |
+
batch["input_values"] = inputs.input_values[0]
|
620 |
+
batch["input_length"] = len(batch["input_values"])
|
621 |
+
|
622 |
+
# encode targets
|
623 |
+
additional_kwargs = {}
|
624 |
+
if phoneme_language is not None:
|
625 |
+
additional_kwargs["phonemizer_lang"] = phoneme_language
|
626 |
+
|
627 |
+
batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
|
628 |
+
return batch
|
629 |
+
|
630 |
+
with training_args.main_process_first(desc="dataset map preprocessing"):
|
631 |
+
vectorized_datasets = raw_datasets.map(
|
632 |
+
prepare_dataset,
|
633 |
+
remove_columns=next(iter(raw_datasets.values())).column_names,
|
634 |
+
num_proc=num_workers,
|
635 |
+
desc="preprocess datasets",
|
636 |
+
)
|
637 |
+
|
638 |
+
def is_audio_in_length_range(length):
|
639 |
+
return length > min_input_length and length < max_input_length
|
640 |
+
|
641 |
+
# filter data that is shorter than min_input_length
|
642 |
+
vectorized_datasets = vectorized_datasets.filter(
|
643 |
+
is_audio_in_length_range,
|
644 |
+
num_proc=num_workers,
|
645 |
+
input_columns=["input_length"],
|
646 |
+
)
|
647 |
+
|
648 |
+
# 7. Next, we can prepare the training.
|
649 |
+
# Let's use word error rate (WER) as our evaluation metric,
|
650 |
+
# instantiate a data collator and the trainer
|
651 |
+
|
652 |
+
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
|
653 |
+
eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
|
654 |
+
|
655 |
+
# for large datasets it is advised to run the preprocessing on a
|
656 |
+
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
657 |
+
# be a timeout when running the script in distributed mode.
|
658 |
+
# In a second step ``args.preprocessing_only`` can then be set to `False` to load the
|
659 |
+
# cached dataset
|
660 |
+
if data_args.preprocessing_only:
|
661 |
+
logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
|
662 |
+
return
|
663 |
+
|
664 |
+
def compute_metrics(pred):
|
665 |
+
pred_logits = pred.predictions
|
666 |
+
pred_ids = np.argmax(pred_logits, axis=-1)
|
667 |
+
|
668 |
+
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
|
669 |
+
|
670 |
+
pred_str = tokenizer.batch_decode(pred_ids)
|
671 |
+
# we do not want to group tokens when computing the metrics
|
672 |
+
label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
|
673 |
+
|
674 |
+
metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
|
675 |
+
|
676 |
+
return metrics
|
677 |
+
|
678 |
+
# Now save everything to be able to create a single processor later
|
679 |
+
if is_main_process(training_args.local_rank):
|
680 |
+
# save feature extractor, tokenizer and config
|
681 |
+
feature_extractor.save_pretrained(training_args.output_dir)
|
682 |
+
tokenizer.save_pretrained(training_args.output_dir)
|
683 |
+
config.save_pretrained(training_args.output_dir)
|
684 |
+
|
685 |
+
try:
|
686 |
+
processor = AutoProcessor.from_pretrained(training_args.output_dir)
|
687 |
+
except (OSError, KeyError):
|
688 |
+
warnings.warn(
|
689 |
+
"Loading a processor from a feature extractor config that does not"
|
690 |
+
" include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
|
691 |
+
" attribute to your `preprocessor_config.json` file to suppress this warning: "
|
692 |
+
" `'processor_class': 'Wav2Vec2Processor'`",
|
693 |
+
FutureWarning,
|
694 |
+
)
|
695 |
+
processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
|
696 |
+
|
697 |
+
# Instantiate custom data collator
|
698 |
+
data_collator = DataCollatorCTCWithPadding(processor=processor)
|
699 |
+
|
700 |
+
# Initialize Trainer
|
701 |
+
trainer = Trainer(
|
702 |
+
model=model,
|
703 |
+
data_collator=data_collator,
|
704 |
+
args=training_args,
|
705 |
+
compute_metrics=compute_metrics,
|
706 |
+
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
|
707 |
+
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
|
708 |
+
tokenizer=feature_extractor,
|
709 |
+
)
|
710 |
+
|
711 |
+
# 8. Finally, we can start training
|
712 |
+
|
713 |
+
# Training
|
714 |
+
if training_args.do_train:
|
715 |
+
|
716 |
+
# use last checkpoint if exist
|
717 |
+
if last_checkpoint is not None:
|
718 |
+
checkpoint = last_checkpoint
|
719 |
+
elif os.path.isdir(model_args.model_name_or_path):
|
720 |
+
checkpoint = model_args.model_name_or_path
|
721 |
+
else:
|
722 |
+
checkpoint = None
|
723 |
+
|
724 |
+
train_result = trainer.train(resume_from_checkpoint=checkpoint)
|
725 |
+
trainer.save_model()
|
726 |
+
|
727 |
+
metrics = train_result.metrics
|
728 |
+
max_train_samples = (
|
729 |
+
data_args.max_train_samples
|
730 |
+
if data_args.max_train_samples is not None
|
731 |
+
else len(vectorized_datasets["train"])
|
732 |
+
)
|
733 |
+
metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
|
734 |
+
|
735 |
+
trainer.log_metrics("train", metrics)
|
736 |
+
trainer.save_metrics("train", metrics)
|
737 |
+
trainer.save_state()
|
738 |
+
|
739 |
+
# Evaluation
|
740 |
+
results = {}
|
741 |
+
if training_args.do_eval:
|
742 |
+
logger.info("*** Evaluate ***")
|
743 |
+
metrics = trainer.evaluate()
|
744 |
+
max_eval_samples = (
|
745 |
+
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
|
746 |
+
)
|
747 |
+
metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
|
748 |
+
|
749 |
+
trainer.log_metrics("eval", metrics)
|
750 |
+
trainer.save_metrics("eval", metrics)
|
751 |
+
|
752 |
+
# Write model card and (optionally) push to hub
|
753 |
+
config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
|
754 |
+
kwargs = {
|
755 |
+
"finetuned_from": model_args.model_name_or_path,
|
756 |
+
"tasks": "speech-recognition",
|
757 |
+
"tags": ["automatic-speech-recognition", data_args.dataset_name],
|
758 |
+
"dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
|
759 |
+
"dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
|
760 |
+
}
|
761 |
+
if "common_voice" in data_args.dataset_name:
|
762 |
+
kwargs["language"] = config_name
|
763 |
+
|
764 |
+
if training_args.push_to_hub:
|
765 |
+
trainer.push_to_hub(**kwargs)
|
766 |
+
else:
|
767 |
+
trainer.create_model_card(**kwargs)
|
768 |
+
|
769 |
+
return results
|
770 |
+
|
771 |
+
|
772 |
+
if __name__ == "__main__":
|
773 |
+
main()
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9b58429fd47b7355babb68eb4ecb995c4e4472cb1b280d0d7290761bca41cc4
|
3 |
+
size 3055
|
vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"(": 1, ")": 2, "0": 3, "3": 4, "7": 5, "8": 6, "9": 7, "a": 8, "b": 9, "c": 10, "d": 11, "e": 12, "f": 13, "g": 14, "h": 15, "i": 16, "j": 17, "k": 18, "l": 19, "m": 20, "n": 21, "o": 22, "p": 23, "q": 24, "r": 25, "s": 26, "t": 27, "u": 28, "v": 29, "w": 30, "x": 31, "y": 32, "z": 33, "å": 34, "æ": 35, "ø": 36, "|": 0, "[UNK]": 37, "[PAD]": 38}
|
wandb/debug-internal.log
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
run-20220830_110431-yvlr8ud4/logs/debug-internal.log
|
wandb/debug.log
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
run-20220830_110431-yvlr8ud4/logs/debug.log
|
wandb/latest-run
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
run-20220830_110431-yvlr8ud4
|
wandb/run-20220830_110431-yvlr8ud4/files/config.yaml
ADDED
The diff for this file is too large to render.
See raw diff
|
|
wandb/run-20220830_110431-yvlr8ud4/files/output.log
ADDED
@@ -0,0 +1,2653 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
0%| | 100/483200 [03:34<128:24:34, 1.05it/s]
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
|
127 |
+
|
128 |
+
|
129 |
+
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
|
169 |
+
|
170 |
+
0%| | 198/483200 [07:07<141:52:25, 1.06s/it]
|
171 |
+
|
172 |
+
|
173 |
+
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
|
178 |
+
|
179 |
+
|
180 |
+
|
181 |
+
|
182 |
+
|
183 |
+
|
184 |
+
|
185 |
+
|
186 |
+
|
187 |
+
|
188 |
+
|
189 |
+
|
190 |
+
|
191 |
+
|
192 |
+
|
193 |
+
|
194 |
+
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
|
199 |
+
|
200 |
+
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
|
215 |
+
|
216 |
+
|
217 |
+
|
218 |
+
|
219 |
+
|
220 |
+
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
|
231 |
+
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
+
|
241 |
+
|
242 |
+
|
243 |
+
|
244 |
+
|
245 |
+
|
246 |
+
|
247 |
+
|
248 |
+
|
249 |
+
|
250 |
+
|
251 |
+
|
252 |
+
|
253 |
+
|
254 |
+
|
255 |
+
|
256 |
+
0%|▏ | 299/483200 [10:44<136:25:48, 1.02s/it]
|
257 |
+
|
258 |
+
|
259 |
+
|
260 |
+
|
261 |
+
|
262 |
+
|
263 |
+
|
264 |
+
|
265 |
+
|
266 |
+
|
267 |
+
|
268 |
+
|
269 |
+
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
|
276 |
+
|
277 |
+
|
278 |
+
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
|
284 |
+
|
285 |
+
|
286 |
+
|
287 |
+
|
288 |
+
|
289 |
+
|
290 |
+
|
291 |
+
|
292 |
+
|
293 |
+
|
294 |
+
|
295 |
+
|
296 |
+
|
297 |
+
|
298 |
+
|
299 |
+
|
300 |
+
|
301 |
+
|
302 |
+
|
303 |
+
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
|
308 |
+
|
309 |
+
|
310 |
+
|
311 |
+
|
312 |
+
|
313 |
+
|
314 |
+
|
315 |
+
|
316 |
+
|
317 |
+
|
318 |
+
|
319 |
+
|
320 |
+
|
321 |
+
|
322 |
+
|
323 |
+
|
324 |
+
|
325 |
+
|
326 |
+
|
327 |
+
|
328 |
+
|
329 |
+
|
330 |
+
|
331 |
+
|
332 |
+
|
333 |
+
|
334 |
+
|
335 |
+
|
336 |
+
|
337 |
+
|
338 |
+
|
339 |
+
|
340 |
+
|
341 |
+
|
342 |
+
0%|▏ | 398/483200 [14:22<142:07:15, 1.06s/it]
|
343 |
+
|
344 |
+
|
345 |
+
|
346 |
+
|
347 |
+
|
348 |
+
|
349 |
+
|
350 |
+
|
351 |
+
|
352 |
+
|
353 |
+
|
354 |
+
|
355 |
+
|
356 |
+
|
357 |
+
|
358 |
+
|
359 |
+
|
360 |
+
|
361 |
+
|
362 |
+
|
363 |
+
|
364 |
+
|
365 |
+
|
366 |
+
|
367 |
+
|
368 |
+
|
369 |
+
|
370 |
+
|
371 |
+
|
372 |
+
|
373 |
+
|
374 |
+
|
375 |
+
|
376 |
+
|
377 |
+
|
378 |
+
|
379 |
+
|
380 |
+
|
381 |
+
|
382 |
+
|
383 |
+
|
384 |
+
|
385 |
+
|
386 |
+
|
387 |
+
|
388 |
+
|
389 |
+
|
390 |
+
|
391 |
+
|
392 |
+
|
393 |
+
|
394 |
+
|
395 |
+
|
396 |
+
|
397 |
+
|
398 |
+
|
399 |
+
|
400 |
+
|
401 |
+
|
402 |
+
|
403 |
+
|
404 |
+
|
405 |
+
|
406 |
+
|
407 |
+
|
408 |
+
|
409 |
+
|
410 |
+
|
411 |
+
|
412 |
+
|
413 |
+
|
414 |
+
|
415 |
+
|
416 |
+
|
417 |
+
|
418 |
+
|
419 |
+
|
420 |
+
|
421 |
+
|
422 |
+
|
423 |
+
|
424 |
+
|
425 |
+
|
426 |
+
|
427 |
+
|
428 |
+
|
429 |
+
0%|▏ | 499/483200 [18:00<139:01:28, 1.04s/it]
|
430 |
+
0%|▏ | 500/483200 [18:01<131:51:34, 1.02it/s]The following columns in the evaluation set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length. If input_length are not expected by `Wav2Vec2ForCTC.forward`, you can safely ignore this message.
|
431 |
+
***** Running Evaluation *****
|
432 |
+
Num examples = 75595
|
433 |
+
Batch size = 12
|
434 |
+
|
435 |
+
|
436 |
+
|
437 |
+
|
438 |
+
|
439 |
+
|
440 |
+
|
441 |
+
|
442 |
+
|
443 |
+
|
444 |
+
|
445 |
+
|
446 |
+
|
447 |
+
|
448 |
+
|
449 |
+
|
450 |
+
|
451 |
+
|
452 |
+
|
453 |
+
|
454 |
+
|
455 |
+
|
456 |
+
|
457 |
+
|
458 |
+
|
459 |
+
|
460 |
+
|
461 |
+
|
462 |
+
|
463 |
+
|
464 |
+
|
465 |
+
|
466 |
+
|
467 |
+
|
468 |
+
|
469 |
+
|
470 |
+
|
471 |
+
|
472 |
+
|
473 |
+
|
474 |
+
|
475 |
+
|
476 |
+
|
477 |
+
|
478 |
+
|
479 |
+
|
480 |
+
|
481 |
+
|
482 |
+
|
483 |
+
|
484 |
+
|
485 |
+
|
486 |
+
|
487 |
+
|
488 |
+
|
489 |
+
|
490 |
+
|
491 |
+
|
492 |
+
|
493 |
+
|
494 |
+
|
495 |
+
|
496 |
+
|
497 |
+
|
498 |
+
|
499 |
+
|
500 |
+
|
501 |
+
|
502 |
+
|
503 |
+
|
504 |
+
|
505 |
+
|
506 |
+
|
507 |
+
|
508 |
+
|
509 |
+
|
510 |
+
|
511 |
+
|
512 |
+
|
513 |
+
|
514 |
+
|
515 |
+
|
516 |
+
|
517 |
+
|
518 |
+
|
519 |
+
|
520 |
+
|
521 |
+
|
522 |
+
|
523 |
+
|
524 |
+
|
525 |
+
|
526 |
+
|
527 |
+
|
528 |
+
|
529 |
+
|
530 |
+
|
531 |
+
|
532 |
+
|
533 |
+
|
534 |
+
|
535 |
+
|
536 |
+
|
537 |
+
|
538 |
+
|
539 |
+
|
540 |
+
|
541 |
+
|
542 |
+
|
543 |
+
|
544 |
+
|
545 |
+
|
546 |
+
|
547 |
+
|
548 |
+
|
549 |
+
|
550 |
+
|
551 |
+
|
552 |
+
|
553 |
+
|
554 |
+
|
555 |
+
|
556 |
+
|
557 |
+
|
558 |
+
|
559 |
+
|
560 |
+
|
561 |
+
|
562 |
+
|
563 |
+
|
564 |
+
|
565 |
+
|
566 |
+
|
567 |
+
|
568 |
+
|
569 |
+
|
570 |
+
|
571 |
+
|
572 |
+
|
573 |
+
|
574 |
+
|
575 |
+
|
576 |
+
|
577 |
+
|
578 |
+
|
579 |
+
|
580 |
+
|
581 |
+
|
582 |
+
|
583 |
+
|
584 |
+
|
585 |
+
|
586 |
+
|
587 |
+
|
588 |
+
|
589 |
+
|
590 |
+
|
591 |
+
|
592 |
+
|
593 |
+
|
594 |
+
|
595 |
+
|
596 |
+
|
597 |
+
|
598 |
+
|
599 |
+
|
600 |
+
|
601 |
+
|
602 |
+
|
603 |
+
|
604 |
+
|
605 |
+
|
606 |
+
|
607 |
+
|
608 |
+
|
609 |
+
|
610 |
+
|
611 |
+
|
612 |
+
|
613 |
+
|
614 |
+
|
615 |
+
|
616 |
+
|
617 |
+
|
618 |
+
|
619 |
+
|
620 |
+
|
621 |
+
|
622 |
+
|
623 |
+
|
624 |
+
|
625 |
+
|
626 |
+
|
627 |
+
|
628 |
+
|
629 |
+
|
630 |
+
|
631 |
+
|
632 |
+
|
633 |
+
|
634 |
+
|
635 |
+
|
636 |
+
|
637 |
+
|
638 |
+
|
639 |
+
|
640 |
+
|
641 |
+
|
642 |
+
|
643 |
+
|
644 |
+
|
645 |
+
|
646 |
+
|
647 |
+
|
648 |
+
|
649 |
+
|
650 |
+
|
651 |
+
|
652 |
+
|
653 |
+
|
654 |
+
|
655 |
+
|
656 |
+
|
657 |
+
|
658 |
+
|
659 |
+
|
660 |
+
|
661 |
+
|
662 |
+
|
663 |
+
|
664 |
+
|
665 |
+
|
666 |
+
|
667 |
+
|
668 |
+
|
669 |
+
|
670 |
+
|
671 |
+
|
672 |
+
|
673 |
+
|
674 |
+
|
675 |
+
|
676 |
+
|
677 |
+
|
678 |
+
|
679 |
+
|
680 |
+
|
681 |
+
|
682 |
+
|
683 |
+
|
684 |
+
|
685 |
+
|
686 |
+
|
687 |
+
|
688 |
+
|
689 |
+
|
690 |
+
|
691 |
+
|
692 |
+
|
693 |
+
|
694 |
+
|
695 |
+
|
696 |
+
|
697 |
+
|
698 |
+
|
699 |
+
|
700 |
+
|
701 |
+
|
702 |
+
|
703 |
+
|
704 |
+
|
705 |
+
|
706 |
+
|
707 |
+
|
708 |
+
|
709 |
+
|
710 |
+
|
711 |
+
|
712 |
+
|
713 |
+
|
714 |
+
|
715 |
+
|
716 |
+
|
717 |
+
|
718 |
+
|
719 |
+
|
720 |
+
|
721 |
+
|
722 |
+
|
723 |
+
|
724 |
+
|
725 |
+
|
726 |
+
|
727 |
+
|
728 |
+
|
729 |
+
|
730 |
+
|
731 |
+
|
732 |
+
|
733 |
+
|
734 |
+
|
735 |
+
|
736 |
+
|
737 |
+
|
738 |
+
|
739 |
+
|
740 |
+
|
741 |
+
|
742 |
+
|
743 |
+
|
744 |
+
|
745 |
+
|
746 |
+
|
747 |
+
|
748 |
+
|
749 |
+
|
750 |
+
|
751 |
+
|
752 |
+
|
753 |
+
|
754 |
+
|
755 |
+
|
756 |
+
|
757 |
+
|
758 |
+
|
759 |
+
|
760 |
+
|
761 |
+
|
762 |
+
|
763 |
+
|
764 |
+
|
765 |
+
|
766 |
+
|
767 |
+
|
768 |
+
|
769 |
+
|
770 |
+
|
771 |
+
|
772 |
+
|
773 |
+
|
774 |
+
|
775 |
+
|
776 |
+
|
777 |
+
|
778 |
+
|
779 |
+
|
780 |
+
|
781 |
+
|
782 |
+
|
783 |
+
|
784 |
+
|
785 |
+
|
786 |
+
|
787 |
+
|
788 |
+
|
789 |
+
|
790 |
+
|
791 |
+
|
792 |
+
|
793 |
+
|
794 |
+
|
795 |
+
|
796 |
+
|
797 |
+
|
798 |
+
|
799 |
+
|
800 |
+
|
801 |
+
|
802 |
+
|
803 |
+
|
804 |
+
|
805 |
+
|
806 |
+
|
807 |
+
|
808 |
+
|
809 |
+
|
810 |
+
|
811 |
+
|
812 |
+
|
813 |
+
|
814 |
+
|
815 |
+
|
816 |
+
|
817 |
+
|
818 |
+
|
819 |
+
|
820 |
+
|
821 |
+
|
822 |
+
|
823 |
+
|
824 |
+
|
825 |
+
|
826 |
+
|
827 |
+
|
828 |
+
|
829 |
+
|
830 |
+
|
831 |
+
|
832 |
+
|
833 |
+
|
834 |
+
|
835 |
+
|
836 |
+
|
837 |
+
|
838 |
+
|
839 |
+
|
840 |
+
|
841 |
+
|
842 |
+
|
843 |
+
|
844 |
+
|
845 |
+
|
846 |
+
|
847 |
+
|
848 |
+
|
849 |
+
|
850 |
+
|
851 |
+
|
852 |
+
|
853 |
+
|
854 |
+
|
855 |
+
|
856 |
+
|
857 |
+
|
858 |
+
|
859 |
+
|
860 |
+
|
861 |
+
|
862 |
+
|
863 |
+
|
864 |
+
|
865 |
+
|
866 |
+
|
867 |
+
|
868 |
+
|
869 |
+
|
870 |
+
|
871 |
+
|
872 |
+
|
873 |
+
|
874 |
+
|
875 |
+
|
876 |
+
|
877 |
+
|
878 |
+
|
879 |
+
|
880 |
+
|
881 |
+
|
882 |
+
|
883 |
+
|
884 |
+
|
885 |
+
|
886 |
+
|
887 |
+
|
888 |
+
|
889 |
+
|
890 |
+
|
891 |
+
|
892 |
+
|
893 |
+
|
894 |
+
|
895 |
+
|
896 |
+
|
897 |
+
|
898 |
+
|
899 |
+
|
900 |
+
|
901 |
+
|
902 |
+
|
903 |
+
|
904 |
+
|
905 |
+
|
906 |
+
|
907 |
+
|
908 |
+
|
909 |
+
|
910 |
+
|
911 |
+
|
912 |
+
|
913 |
+
|
914 |
+
|
915 |
+
|
916 |
+
|
917 |
+
|
918 |
+
|
919 |
+
|
920 |
+
|
921 |
+
|
922 |
+
|
923 |
+
|
924 |
+
|
925 |
+
|
926 |
+
|
927 |
+
|
928 |
+
|
929 |
+
|
930 |
+
|
931 |
+
|
932 |
+
|
933 |
+
|
934 |
+
|
935 |
+
|
936 |
+
|
937 |
+
|
938 |
+
|
939 |
+
|
940 |
+
|
941 |
+
|
942 |
+
|
943 |
+
|
944 |
+
|
945 |
+
|
946 |
+
|
947 |
+
|
948 |
+
|
949 |
+
|
950 |
+
|
951 |
+
|
952 |
+
|
953 |
+
|
954 |
+
|
955 |
+
|
956 |
+
|
957 |
+
|
958 |
+
|
959 |
+
|
960 |
+
|
961 |
+
|
962 |
+
|
963 |
+
|
964 |
+
|
965 |
+
|
966 |
+
|
967 |
+
|
968 |
+
|
969 |
+
|
970 |
+
|
971 |
+
|
972 |
+
|
973 |
+
|
974 |
+
|
975 |
+
|
976 |
+
|
977 |
+
|
978 |
+
|
979 |
+
|
980 |
+
|
981 |
+
|
982 |
+
|
983 |
+
|
984 |
+
|
985 |
+
|
986 |
+
|
987 |
+
|
988 |
+
|
989 |
+
|
990 |
+
|
991 |
+
|
992 |
+
|
993 |
+
|
994 |
+
|
995 |
+
|
996 |
+
|
997 |
+
|
998 |
+
|
999 |
+
|
1000 |
+
|
1001 |
+
|
1002 |
+
|
1003 |
+
|
1004 |
+
|
1005 |
+
|
1006 |
+
|
1007 |
+
|
1008 |
+
|
1009 |
+
|
1010 |
+
|
1011 |
+
|
1012 |
+
|
1013 |
+
|
1014 |
+
|
1015 |
+
|
1016 |
+
|
1017 |
+
|
1018 |
+
|
1019 |
+
|
1020 |
+
|
1021 |
+
|
1022 |
+
|
1023 |
+
|
1024 |
+
|
1025 |
+
|
1026 |
+
|
1027 |
+
|
1028 |
+
|
1029 |
+
|
1030 |
+
|
1031 |
+
|
1032 |
+
|
1033 |
+
|
1034 |
+
|
1035 |
+
|
1036 |
+
|
1037 |
+
|
1038 |
+
|
1039 |
+
|
1040 |
+
|
1041 |
+
|
1042 |
+
|
1043 |
+
|
1044 |
+
|
1045 |
+
|
1046 |
+
|
1047 |
+
|
1048 |
+
|
1049 |
+
|
1050 |
+
|
1051 |
+
|
1052 |
+
|
1053 |
+
|
1054 |
+
|
1055 |
+
|
1056 |
+
|
1057 |
+
|
1058 |
+
|
1059 |
+
|
1060 |
+
|
1061 |
+
|
1062 |
+
|
1063 |
+
|
1064 |
+
|
1065 |
+
|
1066 |
+
|
1067 |
+
|
1068 |
+
|
1069 |
+
|
1070 |
+
|
1071 |
+
|
1072 |
+
|
1073 |
+
|
1074 |
+
|
1075 |
+
|
1076 |
+
|
1077 |
+
|
1078 |
+
|
1079 |
+
|
1080 |
+
|
1081 |
+
|
1082 |
+
|
1083 |
+
|
1084 |
+
|
1085 |
+
|
1086 |
+
|
1087 |
+
|
1088 |
+
|
1089 |
+
|
1090 |
+
|
1091 |
+
|
1092 |
+
|
1093 |
+
|
1094 |
+
|
1095 |
+
|
1096 |
+
|
1097 |
+
|
1098 |
+
|
1099 |
+
|
1100 |
+
|
1101 |
+
|
1102 |
+
|
1103 |
+
|
1104 |
+
|
1105 |
+
|
1106 |
+
|
1107 |
+
|
1108 |
+
|
1109 |
+
|
1110 |
+
|
1111 |
+
|
1112 |
+
|
1113 |
+
|
1114 |
+
|
1115 |
+
|
1116 |
+
|
1117 |
+
|
1118 |
+
|
1119 |
+
|
1120 |
+
|
1121 |
+
|
1122 |
+
|
1123 |
+
|
1124 |
+
|
1125 |
+
|
1126 |
+
|
1127 |
+
|
1128 |
+
|
1129 |
+
|
1130 |
+
|
1131 |
+
|
1132 |
+
|
1133 |
+
|
1134 |
+
|
1135 |
+
|
1136 |
+
|
1137 |
+
|
1138 |
+
|
1139 |
+
|
1140 |
+
|
1141 |
+
|
1142 |
+
|
1143 |
+
|
1144 |
+
|
1145 |
+
|
1146 |
+
|
1147 |
+
|
1148 |
+
|
1149 |
+
|
1150 |
+
|
1151 |
+
|
1152 |
+
|
1153 |
+
|
1154 |
+
|
1155 |
+
|
1156 |
+
|
1157 |
+
|
1158 |
+
|
1159 |
+
|
1160 |
+
|
1161 |
+
|
1162 |
+
|
1163 |
+
|
1164 |
+
|
1165 |
+
|
1166 |
+
|
1167 |
+
|
1168 |
+
|
1169 |
+
|
1170 |
+
|
1171 |
+
|
1172 |
+
|
1173 |
+
|
1174 |
+
|
1175 |
+
|
1176 |
+
|
1177 |
+
|
1178 |
+
|
1179 |
+
|
1180 |
+
|
1181 |
+
|
1182 |
+
|
1183 |
+
|
1184 |
+
|
1185 |
+
|
1186 |
+
|
1187 |
+
|
1188 |
+
|
1189 |
+
|
1190 |
+
|
1191 |
+
|
1192 |
+
|
1193 |
+
|
1194 |
+
|
1195 |
+
|
1196 |
+
|
1197 |
+
|
1198 |
+
|
1199 |
+
|
1200 |
+
|
1201 |
+
|
1202 |
+
|
1203 |
+
|
1204 |
+
|
1205 |
+
|
1206 |
+
|
1207 |
+
|
1208 |
+
|
1209 |
+
|
1210 |
+
|
1211 |
+
|
1212 |
+
|
1213 |
+
|
1214 |
+
|
1215 |
+
|
1216 |
+
|
1217 |
+
|
1218 |
+
|
1219 |
+
|
1220 |
+
|
1221 |
+
|
1222 |
+
|
1223 |
+
|
1224 |
+
|
1225 |
+
|
1226 |
+
|
1227 |
+
|
1228 |
+
|
1229 |
+
|
1230 |
+
|
1231 |
+
|
1232 |
+
|
1233 |
+
|
1234 |
+
|
1235 |
+
|
1236 |
+
|
1237 |
+
|
1238 |
+
|
1239 |
+
|
1240 |
+
|
1241 |
+
|
1242 |
+
|
1243 |
+
|
1244 |
+
|
1245 |
+
|
1246 |
+
|
1247 |
+
|
1248 |
+
|
1249 |
+
|
1250 |
+
|
1251 |
+
|
1252 |
+
|
1253 |
+
|
1254 |
+
|
1255 |
+
|
1256 |
+
|
1257 |
+
|
1258 |
+
|
1259 |
+
|
1260 |
+
|
1261 |
+
|
1262 |
+
|
1263 |
+
|
1264 |
+
|
1265 |
+
|
1266 |
+
|
1267 |
+
|
1268 |
+
|
1269 |
+
|
1270 |
+
|
1271 |
+
|
1272 |
+
|
1273 |
+
|
1274 |
+
|
1275 |
+
|
1276 |
+
|
1277 |
+
|
1278 |
+
|
1279 |
+
|
1280 |
+
|
1281 |
+
|
1282 |
+
|
1283 |
+
|
1284 |
+
|
1285 |
+
|
1286 |
+
|
1287 |
+
|
1288 |
+
|
1289 |
+
|
1290 |
+
|
1291 |
+
|
1292 |
+
|
1293 |
+
|
1294 |
+
|
1295 |
+
|
1296 |
+
|
1297 |
+
|
1298 |
+
|
1299 |
+
|
1300 |
+
|
1301 |
+
|
1302 |
+
|
1303 |
+
|
1304 |
+
|
1305 |
+
|
1306 |
+
|
1307 |
+
|
1308 |
+
|
1309 |
+
|
1310 |
+
|
1311 |
+
|
1312 |
+
|
1313 |
+
|
1314 |
+
|
1315 |
+
|
1316 |
+
|
1317 |
+
|
1318 |
+
|
1319 |
+
|
1320 |
+
|
1321 |
+
|
1322 |
+
|
1323 |
+
|
1324 |
+
|
1325 |
+
|
1326 |
+
|
1327 |
+
|
1328 |
+
|
1329 |
+
|
1330 |
+
|
1331 |
+
|
1332 |
+
|
1333 |
+
|
1334 |
+
|
1335 |
+
|
1336 |
+
|
1337 |
+
|
1338 |
+
|
1339 |
+
|
1340 |
+
|
1341 |
+
|
1342 |
+
|
1343 |
+
|
1344 |
+
|
1345 |
+
|
1346 |
+
|
1347 |
+
|
1348 |
+
|
1349 |
+
|
1350 |
+
|
1351 |
+
|
1352 |
+
|
1353 |
+
|
1354 |
+
|
1355 |
+
|
1356 |
+
|
1357 |
+
|
1358 |
+
|
1359 |
+
|
1360 |
+
|
1361 |
+
|
1362 |
+
|
1363 |
+
|
1364 |
+
|
1365 |
+
|
1366 |
+
|
1367 |
+
|
1368 |
+
|
1369 |
+
|
1370 |
+
|
1371 |
+
|
1372 |
+
|
1373 |
+
|
1374 |
+
|
1375 |
+
|
1376 |
+
|
1377 |
+
|
1378 |
+
|
1379 |
+
|
1380 |
+
|
1381 |
+
|
1382 |
+
|
1383 |
+
|
1384 |
+
|
1385 |
+
|
1386 |
+
|
1387 |
+
|
1388 |
+
|
1389 |
+
|
1390 |
+
|
1391 |
+
|
1392 |
+
|
1393 |
+
|
1394 |
+
|
1395 |
+
|
1396 |
+
|
1397 |
+
|
1398 |
+
|
1399 |
+
|
1400 |
+
|
1401 |
+
|
1402 |
+
|
1403 |
+
|
1404 |
+
|
1405 |
+
|
1406 |
+
|
1407 |
+
|
1408 |
+
|
1409 |
+
|
1410 |
+
|
1411 |
+
|
1412 |
+
|
1413 |
+
|
1414 |
+
|
1415 |
+
|
1416 |
+
|
1417 |
+
|
1418 |
+
|
1419 |
+
|
1420 |
+
|
1421 |
+
|
1422 |
+
|
1423 |
+
|
1424 |
+
|
1425 |
+
|
1426 |
+
|
1427 |
+
|
1428 |
+
|
1429 |
+
|
1430 |
+
|
1431 |
+
|
1432 |
+
|
1433 |
+
|
1434 |
+
|
1435 |
+
|
1436 |
+
|
1437 |
+
|
1438 |
+
|
1439 |
+
|
1440 |
+
|
1441 |
+
|
1442 |
+
|
1443 |
+
|
1444 |
+
|
1445 |
+
|
1446 |
+
|
1447 |
+
|
1448 |
+
|
1449 |
+
|
1450 |
+
|
1451 |
+
|
1452 |
+
|
1453 |
+
|
1454 |
+
|
1455 |
+
|
1456 |
+
|
1457 |
+
|
1458 |
+
|
1459 |
+
|
1460 |
+
|
1461 |
+
|
1462 |
+
|
1463 |
+
|
1464 |
+
|
1465 |
+
|
1466 |
+
|
1467 |
+
|
1468 |
+
|
1469 |
+
|
1470 |
+
|
1471 |
+
|
1472 |
+
|
1473 |
+
|
1474 |
+
|
1475 |
+
|
1476 |
+
|
1477 |
+
|
1478 |
+
|
1479 |
+
|
1480 |
+
|
1481 |
+
|
1482 |
+
|
1483 |
+
|
1484 |
+
|
1485 |
+
|
1486 |
+
|
1487 |
+
|
1488 |
+
|
1489 |
+
|
1490 |
+
|
1491 |
+
|
1492 |
+
|
1493 |
+
|
1494 |
+
|
1495 |
+
|
1496 |
+
|
1497 |
+
|
1498 |
+
|
1499 |
+
|
1500 |
+
|
1501 |
+
|
1502 |
+
|
1503 |
+
|
1504 |
+
|
1505 |
+
|
1506 |
+
|
1507 |
+
|
1508 |
+
|
1509 |
+
|
1510 |
+
|
1511 |
+
|
1512 |
+
|
1513 |
+
|
1514 |
+
|
1515 |
+
|
1516 |
+
|
1517 |
+
|
1518 |
+
|
1519 |
+
|
1520 |
+
|
1521 |
+
|
1522 |
+
|
1523 |
+
|
1524 |
+
|
1525 |
+
|
1526 |
+
|
1527 |
+
|
1528 |
+
|
1529 |
+
|
1530 |
+
|
1531 |
+
|
1532 |
+
|
1533 |
+
|
1534 |
+
|
1535 |
+
|
1536 |
+
|
1537 |
+
|
1538 |
+
|
1539 |
+
|
1540 |
+
|
1541 |
+
|
1542 |
+
|
1543 |
+
|
1544 |
+
|
1545 |
+
|
1546 |
+
|
1547 |
+
|
1548 |
+
|
1549 |
+
|
1550 |
+
|
1551 |
+
|
1552 |
+
|
1553 |
+
|
1554 |
+
|
1555 |
+
|
1556 |
+
|
1557 |
+
|
1558 |
+
|
1559 |
+
|
1560 |
+
|
1561 |
+
|
1562 |
+
|
1563 |
+
|
1564 |
+
|
1565 |
+
|
1566 |
+
|
1567 |
+
|
1568 |
+
|
1569 |
+
|
1570 |
+
|
1571 |
+
|
1572 |
+
|
1573 |
+
|
1574 |
+
|
1575 |
+
|
1576 |
+
|
1577 |
+
|
1578 |
+
|
1579 |
+
|
1580 |
+
|
1581 |
+
|
1582 |
+
|
1583 |
+
|
1584 |
+
|
1585 |
+
|
1586 |
+
|
1587 |
+
|
1588 |
+
|
1589 |
+
|
1590 |
+
|
1591 |
+
|
1592 |
+
|
1593 |
+
|
1594 |
+
|
1595 |
+
|
1596 |
+
|
1597 |
+
|
1598 |
+
|
1599 |
+
|
1600 |
+
|
1601 |
+
|
1602 |
+
|
1603 |
+
|
1604 |
+
|
1605 |
+
|
1606 |
+
|
1607 |
+
|
1608 |
+
|
1609 |
+
|
1610 |
+
|
1611 |
+
|
1612 |
+
|
1613 |
+
|
1614 |
+
|
1615 |
+
|
1616 |
+
|
1617 |
+
|
1618 |
+
|
1619 |
+
|
1620 |
+
|
1621 |
+
|
1622 |
+
|
1623 |
+
|
1624 |
+
|
1625 |
+
|
1626 |
+
|
1627 |
+
|
1628 |
+
|
1629 |
+
|
1630 |
+
|
1631 |
+
|
1632 |
+
|
1633 |
+
|
1634 |
+
|
1635 |
+
|
1636 |
+
|
1637 |
+
|
1638 |
+
|
1639 |
+
|
1640 |
+
|
1641 |
+
|
1642 |
+
|
1643 |
+
|
1644 |
+
|
1645 |
+
|
1646 |
+
|
1647 |
+
|
1648 |
+
|
1649 |
+
|
1650 |
+
|
1651 |
+
|
1652 |
+
|
1653 |
+
|
1654 |
+
|
1655 |
+
|
1656 |
+
|
1657 |
+
|
1658 |
+
|
1659 |
+
|
1660 |
+
|
1661 |
+
|
1662 |
+
|
1663 |
+
|
1664 |
+
|
1665 |
+
|
1666 |
+
|
1667 |
+
|
1668 |
+
|
1669 |
+
|
1670 |
+
|
1671 |
+
|
1672 |
+
|
1673 |
+
|
1674 |
+
|
1675 |
+
|
1676 |
+
|
1677 |
+
|
1678 |
+
|
1679 |
+
|
1680 |
+
|
1681 |
+
|
1682 |
+
|
1683 |
+
|
1684 |
+
|
1685 |
+
|
1686 |
+
|
1687 |
+
|
1688 |
+
|
1689 |
+
|
1690 |
+
|
1691 |
+
|
1692 |
+
|
1693 |
+
|
1694 |
+
|
1695 |
+
|
1696 |
+
|
1697 |
+
|
1698 |
+
|
1699 |
+
|
1700 |
+
|
1701 |
+
|
1702 |
+
|
1703 |
+
|
1704 |
+
|
1705 |
+
|
1706 |
+
|
1707 |
+
|
1708 |
+
|
1709 |
+
|
1710 |
+
|
1711 |
+
|
1712 |
+
|
1713 |
+
|
1714 |
+
|
1715 |
+
|
1716 |
+
|
1717 |
+
|
1718 |
+
|
1719 |
+
|
1720 |
+
|
1721 |
+
|
1722 |
+
|
1723 |
+
|
1724 |
+
|
1725 |
+
|
1726 |
+
|
1727 |
+
|
1728 |
+
|
1729 |
+
|
1730 |
+
|
1731 |
+
|
1732 |
+
|
1733 |
+
|
1734 |
+
|
1735 |
+
|
1736 |
+
|
1737 |
+
|
1738 |
+
|
1739 |
+
|
1740 |
+
|
1741 |
+
|
1742 |
+
|
1743 |
+
|
1744 |
+
|
1745 |
+
|
1746 |
+
|
1747 |
+
|
1748 |
+
|
1749 |
+
|
1750 |
+
|
1751 |
+
|
1752 |
+
|
1753 |
+
|
1754 |
+
|
1755 |
+
|
1756 |
+
|
1757 |
+
|
1758 |
+
|
1759 |
+
|
1760 |
+
|
1761 |
+
|
1762 |
+
|
1763 |
+
|
1764 |
+
|
1765 |
+
|
1766 |
+
|
1767 |
+
|
1768 |
+
|
1769 |
+
|
1770 |
+
|
1771 |
+
|
1772 |
+
|
1773 |
+
|
1774 |
+
|
1775 |
+
|
1776 |
+
|
1777 |
+
|
1778 |
+
|
1779 |
+
|
1780 |
+
|
1781 |
+
|
1782 |
+
|
1783 |
+
|
1784 |
+
|
1785 |
+
|
1786 |
+
|
1787 |
+
|
1788 |
+
|
1789 |
+
|
1790 |
+
|
1791 |
+
|
1792 |
+
|
1793 |
+
|
1794 |
+
|
1795 |
+
|
1796 |
+
|
1797 |
+
|
1798 |
+
|
1799 |
+
|
1800 |
+
|
1801 |
+
|
1802 |
+
|
1803 |
+
|
1804 |
+
|
1805 |
+
|
1806 |
+
|
1807 |
+
|
1808 |
+
|
1809 |
+
|
1810 |
+
|
1811 |
+
|
1812 |
+
|
1813 |
+
|
1814 |
+
|
1815 |
+
|
1816 |
+
|
1817 |
+
|
1818 |
+
|
1819 |
+
|
1820 |
+
|
1821 |
+
|
1822 |
+
|
1823 |
+
|
1824 |
+
|
1825 |
+
|
1826 |
+
|
1827 |
+
|
1828 |
+
|
1829 |
+
|
1830 |
+
|
1831 |
+
|
1832 |
+
|
1833 |
+
|
1834 |
+
|
1835 |
+
|
1836 |
+
|
1837 |
+
|
1838 |
+
|
1839 |
+
|
1840 |
+
|
1841 |
+
|
1842 |
+
|
1843 |
+
|
1844 |
+
|
1845 |
+
|
1846 |
+
|
1847 |
+
|
1848 |
+
|
1849 |
+
|
1850 |
+
|
1851 |
+
|
1852 |
+
|
1853 |
+
|
1854 |
+
|
1855 |
+
|
1856 |
+
|
1857 |
+
|
1858 |
+
|
1859 |
+
|
1860 |
+
|
1861 |
+
|
1862 |
+
|
1863 |
+
|
1864 |
+
|
1865 |
+
|
1866 |
+
|
1867 |
+
|
1868 |
+
|
1869 |
+
|
1870 |
+
|
1871 |
+
|
1872 |
+
|
1873 |
+
|
1874 |
+
|
1875 |
+
|
1876 |
+
|
1877 |
+
|
1878 |
+
|
1879 |
+
|
1880 |
+
|
1881 |
+
|
1882 |
+
|
1883 |
+
|
1884 |
+
|
1885 |
+
|
1886 |
+
|
1887 |
+
|
1888 |
+
|
1889 |
+
|
1890 |
+
|
1891 |
+
|
1892 |
+
|
1893 |
+
|
1894 |
+
|
1895 |
+
|
1896 |
+
|
1897 |
+
|
1898 |
+
|
1899 |
+
|
1900 |
+
|
1901 |
+
|
1902 |
+
|
1903 |
+
|
1904 |
+
|
1905 |
+
|
1906 |
+
|
1907 |
+
|
1908 |
+
|
1909 |
+
|
1910 |
+
|
1911 |
+
|
1912 |
+
|
1913 |
+
|
1914 |
+
|
1915 |
+
|
1916 |
+
|
1917 |
+
|
1918 |
+
|
1919 |
+
|
1920 |
+
|
1921 |
+
|
1922 |
+
|
1923 |
+
|
1924 |
+
|
1925 |
+
|
1926 |
+
|
1927 |
+
|
1928 |
+
|
1929 |
+
|
1930 |
+
|
1931 |
+
|
1932 |
+
|
1933 |
+
|
1934 |
+
|
1935 |
+
|
1936 |
+
|
1937 |
+
|
1938 |
+
|
1939 |
+
|
1940 |
+
|
1941 |
+
|
1942 |
+
|
1943 |
+
|
1944 |
+
|
1945 |
+
|
1946 |
+
|
1947 |
+
|
1948 |
+
|
1949 |
+
|
1950 |
+
|
1951 |
+
|
1952 |
+
|
1953 |
+
|
1954 |
+
|
1955 |
+
|
1956 |
+
|
1957 |
+
|
1958 |
+
|
1959 |
+
|
1960 |
+
|
1961 |
+
|
1962 |
+
|
1963 |
+
|
1964 |
+
|
1965 |
+
|
1966 |
+
|
1967 |
+
|
1968 |
+
|
1969 |
+
|
1970 |
+
|
1971 |
+
|
1972 |
+
|
1973 |
+
|
1974 |
+
|
1975 |
+
|
1976 |
+
|
1977 |
+
|
1978 |
+
|
1979 |
+
|
1980 |
+
|
1981 |
+
|
1982 |
+
|
1983 |
+
|
1984 |
+
|
1985 |
+
|
1986 |
+
|
1987 |
+
|
1988 |
+
|
1989 |
+
|
1990 |
+
|
1991 |
+
|
1992 |
+
|
1993 |
+
|
1994 |
+
|
1995 |
+
|
1996 |
+
|
1997 |
+
|
1998 |
+
|
1999 |
+
|
2000 |
+
|
2001 |
+
|
2002 |
+
|
2003 |
+
|
2004 |
+
|
2005 |
+
|
2006 |
+
|
2007 |
+
|
2008 |
+
|
2009 |
+
|
2010 |
+
|
2011 |
+
|
2012 |
+
|
2013 |
+
|
2014 |
+
|
2015 |
+
|
2016 |
+
|
2017 |
+
|
2018 |
+
|
2019 |
+
|
2020 |
+
|
2021 |
+
|
2022 |
+
|
2023 |
+
|
2024 |
+
|
2025 |
+
|
2026 |
+
|
2027 |
+
|
2028 |
+
|
2029 |
+
|
2030 |
+
|
2031 |
+
|
2032 |
+
|
2033 |
+
|
2034 |
+
|
2035 |
+
|
2036 |
+
|
2037 |
+
|
2038 |
+
|
2039 |
+
|
2040 |
+
|
2041 |
+
|
2042 |
+
|
2043 |
+
|
2044 |
+
|
2045 |
+
|
2046 |
+
|
2047 |
+
|
2048 |
+
|
2049 |
+
|
2050 |
+
|
2051 |
+
|
2052 |
+
|
2053 |
+
|
2054 |
+
|
2055 |
+
|
2056 |
+
|
2057 |
+
|
2058 |
+
|
2059 |
+
|
2060 |
+
|
2061 |
+
|
2062 |
+
|
2063 |
+
|
2064 |
+
|
2065 |
+
|
2066 |
+
|
2067 |
+
|
2068 |
+
|
2069 |
+
|
2070 |
+
|
2071 |
+
|
2072 |
+
|
2073 |
+
|
2074 |
+
|
2075 |
+
|
2076 |
+
|
2077 |
+
|
2078 |
+
|
2079 |
+
|
2080 |
+
|
2081 |
+
|
2082 |
+
|
2083 |
+
|
2084 |
+
|
2085 |
+
|
2086 |
+
|
2087 |
+
|
2088 |
+
|
2089 |
+
|
2090 |
+
|
2091 |
+
|
2092 |
+
|
2093 |
+
|
2094 |
+
|
2095 |
+
|
2096 |
+
|
2097 |
+
|
2098 |
+
|
2099 |
+
|
2100 |
+
|
2101 |
+
|
2102 |
+
|
2103 |
+
|
2104 |
+
|
2105 |
+
|
2106 |
+
|
2107 |
+
|
2108 |
+
|
2109 |
+
|
2110 |
+
|
2111 |
+
|
2112 |
+
|
2113 |
+
|
2114 |
+
|
2115 |
+
|
2116 |
+
|
2117 |
+
|
2118 |
+
|
2119 |
+
|
2120 |
+
|
2121 |
+
|
2122 |
+
|
2123 |
+
|
2124 |
+
|
2125 |
+
|
2126 |
+
|
2127 |
+
|
2128 |
+
|
2129 |
+
|
2130 |
+
|
2131 |
+
|
2132 |
+
|
2133 |
+
|
2134 |
+
|
2135 |
+
|
2136 |
+
|
2137 |
+
|
2138 |
+
|
2139 |
+
|
2140 |
+
|
2141 |
+
|
2142 |
+
|
2143 |
+
|
2144 |
+
|
2145 |
+
|
2146 |
+
|
2147 |
+
|
2148 |
+
|
2149 |
+
|
2150 |
+
|
2151 |
+
|
2152 |
+
|
2153 |
+
|
2154 |
+
|
2155 |
+
|
2156 |
+
|
2157 |
+
|
2158 |
+
|
2159 |
+
|
2160 |
+
|
2161 |
+
|
2162 |
+
|
2163 |
+
|
2164 |
+
|
2165 |
+
|
2166 |
+
|
2167 |
+
|
2168 |
+
|
2169 |
+
|
2170 |
+
|
2171 |
+
|
2172 |
+
|
2173 |
+
|
2174 |
+
|
2175 |
+
|
2176 |
+
|
2177 |
+
|
2178 |
+
|
2179 |
+
|
2180 |
+
|
2181 |
+
|
2182 |
+
|
2183 |
+
|
2184 |
+
|
2185 |
+
|
2186 |
+
|
2187 |
+
|
2188 |
+
|
2189 |
+
|
2190 |
+
|
2191 |
+
|
2192 |
+
|
2193 |
+
|
2194 |
+
|
2195 |
+
|
2196 |
+
|
2197 |
+
|
2198 |
+
|
2199 |
+
|
2200 |
+
|
2201 |
+
|
2202 |
+
|
2203 |
+
|
2204 |
+
|
2205 |
+
|
2206 |
+
|
2207 |
+
|
2208 |
+
|
2209 |
+
|
2210 |
+
|
2211 |
+
|
2212 |
+
|
2213 |
+
|
2214 |
+
|
2215 |
+
|
2216 |
+
|
2217 |
+
|
2218 |
+
|
2219 |
+
|
2220 |
+
|
2221 |
+
|
2222 |
+
|
2223 |
+
|
2224 |
+
|
2225 |
+
|
2226 |
+
|
2227 |
+
|
2228 |
+
|
2229 |
+
|
2230 |
+
|
2231 |
+
|
2232 |
+
|
2233 |
+
|
2234 |
+
|
2235 |
+
|
2236 |
+
|
2237 |
+
|
2238 |
+
|
2239 |
+
|
2240 |
+
|
2241 |
+
|
2242 |
+
|
2243 |
+
|
2244 |
+
|
2245 |
+
|
2246 |
+
|
2247 |
+
|
2248 |
+
|
2249 |
+
|
2250 |
+
|
2251 |
+
|
2252 |
+
|
2253 |
+
|
2254 |
+
|
2255 |
+
|
2256 |
+
|
2257 |
+
|
2258 |
+
|
2259 |
+
|
2260 |
+
|
2261 |
+
|
2262 |
+
|
2263 |
+
|
2264 |
+
|
2265 |
+
|
2266 |
+
|
2267 |
+
|
2268 |
+
|
2269 |
+
|
2270 |
+
|
2271 |
+
|
2272 |
+
|
2273 |
+
|
2274 |
+
|
2275 |
+
|
2276 |
+
|
2277 |
+
|
2278 |
+
|
2279 |
+
|
2280 |
+
|
2281 |
+
|
2282 |
+
|
2283 |
+
|
2284 |
+
|
2285 |
+
|
2286 |
+
|
2287 |
+
|
2288 |
+
|
2289 |
+
|
2290 |
+
|
2291 |
+
|
2292 |
+
|
2293 |
+
|
2294 |
+
|
2295 |
+
|
2296 |
+
|
2297 |
+
|
2298 |
+
|
2299 |
+
|
2300 |
+
|
2301 |
+
|
2302 |
+
|
2303 |
+
|
2304 |
+
|
2305 |
+
|
2306 |
+
|
2307 |
+
|
2308 |
+
|
2309 |
+
|
2310 |
+
|
2311 |
+
|
2312 |
+
|
2313 |
+
|
2314 |
+
|
2315 |
+
|
2316 |
+
|
2317 |
+
|
2318 |
+
|
2319 |
+
|
2320 |
+
|
2321 |
+
|
2322 |
+
|
2323 |
+
|
2324 |
+
|
2325 |
+
|
2326 |
+
|
2327 |
+
|
2328 |
+
|
2329 |
+
|
2330 |
+
|
2331 |
+
|
2332 |
+
|
2333 |
+
|
2334 |
+
|
2335 |
+
|
2336 |
+
|
2337 |
+
|
2338 |
+
|
2339 |
+
|
2340 |
+
|
2341 |
+
|
2342 |
+
|
2343 |
+
|
2344 |
+
|
2345 |
+
|
2346 |
+
|
2347 |
+
|
2348 |
+
|
2349 |
+
|
2350 |
+
|
2351 |
+
|
2352 |
+
|
2353 |
+
|
2354 |
+
|
2355 |
+
|
2356 |
+
|
2357 |
+
|
2358 |
+
|
2359 |
+
|
2360 |
+
|
2361 |
+
|
2362 |
+
|
2363 |
+
|
2364 |
+
|
2365 |
+
|
2366 |
+
|
2367 |
+
|
2368 |
+
|
2369 |
+
|
2370 |
+
|
2371 |
+
|
2372 |
+
|
2373 |
+
|
2374 |
+
|
2375 |
+
|
2376 |
+
|
2377 |
+
|
2378 |
+
|
2379 |
+
|
2380 |
+
|
2381 |
+
|
2382 |
+
|
2383 |
+
|
2384 |
+
|
2385 |
+
|
2386 |
+
|
2387 |
+
|
2388 |
+
|
2389 |
+
|
2390 |
+
|
2391 |
+
|
2392 |
+
|
2393 |
+
|
2394 |
+
|
2395 |
+
|
2396 |
+
|
2397 |
+
|
2398 |
+
|
2399 |
+
|
2400 |
+
|
2401 |
+
|
2402 |
+
|
2403 |
+
|
2404 |
+
|
2405 |
+
|
2406 |
+
|
2407 |
+
|
2408 |
+
|
2409 |
+
|
2410 |
+
|
2411 |
+
|
2412 |
+
|
2413 |
+
|
2414 |
+
|
2415 |
+
|
2416 |
+
|
2417 |
+
|
2418 |
+
|
2419 |
+
|
2420 |
+
|
2421 |
+
|
2422 |
+
|
2423 |
+
|
2424 |
+
|
2425 |
+
|
2426 |
+
|
2427 |
+
|
2428 |
+
|
2429 |
+
|
2430 |
+
|
2431 |
+
|
2432 |
+
|
2433 |
+
|
2434 |
+
|
2435 |
+
|
2436 |
+
|
2437 |
+
|
2438 |
+
|
2439 |
+
|
2440 |
+
|
2441 |
+
|
2442 |
+
|
2443 |
+
|
2444 |
+
|
2445 |
+
|
2446 |
+
|
2447 |
+
|
2448 |
+
|
2449 |
+
|
2450 |
+
|
2451 |
+
|
2452 |
+
|
2453 |
+
|
2454 |
+
|
2455 |
+
|
2456 |
+
|
2457 |
+
|
2458 |
+
|
2459 |
+
|
2460 |
+
|
2461 |
+
|
2462 |
+
|
2463 |
+
|
2464 |
+
|
2465 |
+
|
2466 |
+
|
2467 |
+
|
2468 |
+
|
2469 |
+
|
2470 |
+
|
2471 |
+
|
2472 |
+
|
2473 |
+
|
2474 |
+
|
2475 |
+
|
2476 |
+
|
2477 |
+
|
2478 |
+
|
2479 |
+
|
2480 |
+
|
2481 |
+
|
2482 |
+
|
2483 |
+
|
2484 |
+
|
2485 |
+
|
2486 |
+
|
2487 |
+
|
2488 |
+
|
2489 |
+
|
2490 |
+
|
2491 |
+
|
2492 |
+
|
2493 |
+
|
2494 |
+
|
2495 |
+
|
2496 |
+
|
2497 |
+
|
2498 |
+
|
2499 |
+
|
2500 |
+
|
2501 |
+
|
2502 |
+
|
2503 |
+
|
2504 |
+
|
2505 |
+
|
2506 |
+
|
2507 |
+
|
2508 |
+
|
2509 |
+
|
2510 |
+
|
2511 |
+
|
2512 |
+
|
2513 |
+
|
2514 |
+
|
2515 |
+
|
2516 |
+
|
2517 |
+
|
2518 |
+
|
2519 |
+
|
2520 |
+
|
2521 |
+
|
2522 |
+
|
2523 |
+
|
2524 |
+
|
2525 |
+
|
2526 |
+
|
2527 |
+
|
2528 |
+
|
2529 |
+
|
2530 |
+
|
2531 |
+
|
2532 |
+
|
2533 |
+
|
2534 |
+
|
2535 |
+
|
2536 |
+
|
2537 |
+
|
2538 |
+
|
2539 |
+
|
2540 |
+
|
2541 |
+
|
2542 |
+
|
2543 |
+
|
2544 |
+
|
2545 |
+
|
2546 |
+
|
2547 |
+
|
2548 |
+
|
2549 |
+
|
2550 |
+
|
2551 |
+
|
2552 |
+
|
2553 |
+
|
2554 |
+
|
2555 |
+
|
2556 |
+
|
2557 |
+
|
2558 |
+
|
2559 |
+
|
2560 |
+
|
2561 |
+
|
2562 |
+
|
2563 |
+
|
2564 |
+
|
2565 |
+
|
2566 |
+
|
2567 |
+
|
2568 |
+
|
2569 |
+
|
2570 |
+
|
2571 |
+
|
2572 |
+
|
2573 |
+
|
2574 |
+
|
2575 |
+
|
2576 |
+
|
2577 |
+
|
2578 |
+
|
2579 |
+
|
2580 |
+
|
2581 |
+
|
2582 |
+
|
2583 |
+
|
2584 |
+
|
2585 |
+
|
2586 |
+
|
2587 |
+
|
2588 |
+
|
2589 |
+
|
2590 |
+
|
2591 |
+
|
2592 |
+
|
2593 |
+
|
2594 |
+
|
2595 |
+
|
2596 |
+
|
2597 |
+
|
2598 |
+
|
2599 |
+
|
2600 |
+
|
2601 |
+
|
2602 |
+
|
2603 |
+
|
2604 |
+
|
2605 |
+
|
2606 |
+
|
2607 |
+
|
2608 |
+
|
2609 |
+
|
2610 |
+
|
2611 |
+
|
2612 |
+
|
2613 |
+
|
2614 |
+
|
2615 |
+
|
2616 |
+
|
2617 |
+
|
2618 |
+
|
2619 |
+
|
2620 |
+
|
2621 |
+
|
2622 |
+
|
2623 |
+
|
2624 |
+
|
2625 |
+
|
2626 |
+
|
2627 |
+
|
2628 |
+
|
2629 |
+
|
2630 |
+
|
2631 |
+
|
2632 |
+
|
2633 |
+
|
2634 |
+
|
2635 |
+
|
2636 |
+
|
2637 |
+
|
2638 |
+
|
2639 |
+
|
2640 |
+
|
2641 |
+
|
2642 |
+
|
2643 |
+
|
2644 |
+
|
2645 |
+
|
2646 |
+
|
2647 |
+
|
2648 |
+
|
2649 |
+
|
2650 |
+
Configuration saved in ./checkpoint-500/config.json
|
2651 |
+
{'eval_loss': 3.416260242462158, 'eval_wer': 0.9938169325893345, 'eval_runtime': 4566.4565, 'eval_samples_per_second': 16.554, 'eval_steps_per_second': 1.38, 'epoch': 0.04}
|
2652 |
+
Model weights saved in ./checkpoint-500/pytorch_model.bin
|
2653 |
+
Feature extractor saved in ./checkpoint-500/preprocessor_config.json
|
wandb/run-20220830_110431-yvlr8ud4/files/requirements.txt
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiohttp==3.8.1
|
2 |
+
aiosignal==1.2.0
|
3 |
+
appdirs==1.4.4
|
4 |
+
async-timeout==4.0.2
|
5 |
+
attrs==21.4.0
|
6 |
+
audioread==2.1.9
|
7 |
+
certifi==2021.10.8
|
8 |
+
cffi==1.15.0
|
9 |
+
charset-normalizer==2.0.12
|
10 |
+
click==8.1.2
|
11 |
+
datasets==2.1.0
|
12 |
+
decorator==5.1.1
|
13 |
+
dill==0.3.4
|
14 |
+
docker-pycreds==0.4.0
|
15 |
+
filelock==3.6.0
|
16 |
+
frozenlist==1.3.0
|
17 |
+
fsspec==2022.3.0
|
18 |
+
gitdb==4.0.9
|
19 |
+
gitpython==3.1.27
|
20 |
+
huggingface-hub==0.5.1
|
21 |
+
hypothesis==6.46.5
|
22 |
+
idna==3.3
|
23 |
+
jiwer==2.3.0
|
24 |
+
joblib==1.1.0
|
25 |
+
kenlm==0.0.0
|
26 |
+
librosa==0.9.1
|
27 |
+
llvmlite==0.38.0
|
28 |
+
multidict==6.0.2
|
29 |
+
multiprocess==0.70.12.2
|
30 |
+
numba==0.55.1
|
31 |
+
numpy==1.21.6
|
32 |
+
packaging==21.3
|
33 |
+
pandas==1.4.2
|
34 |
+
pathtools==0.1.2
|
35 |
+
pillow==9.1.0
|
36 |
+
pip==20.3.4
|
37 |
+
pkg-resources==0.0.0
|
38 |
+
pooch==1.6.0
|
39 |
+
promise==2.3
|
40 |
+
protobuf==3.20.1
|
41 |
+
psutil==5.9.0
|
42 |
+
pyarrow==7.0.0
|
43 |
+
pycparser==2.21
|
44 |
+
pyctcdecode==0.3.0
|
45 |
+
pygtrie==2.4.2
|
46 |
+
pyparsing==3.0.8
|
47 |
+
python-dateutil==2.8.2
|
48 |
+
python-levenshtein==0.12.2
|
49 |
+
pytz==2022.1
|
50 |
+
pyyaml==6.0
|
51 |
+
regex==2022.4.24
|
52 |
+
requests==2.27.1
|
53 |
+
resampy==0.2.2
|
54 |
+
responses==0.18.0
|
55 |
+
sacremoses==0.0.49
|
56 |
+
scikit-learn==1.0.2
|
57 |
+
scipy==1.8.0
|
58 |
+
sentry-sdk==1.5.10
|
59 |
+
setproctitle==1.2.3
|
60 |
+
setuptools==44.1.1
|
61 |
+
shortuuid==1.0.8
|
62 |
+
six==1.16.0
|
63 |
+
smmap==5.0.0
|
64 |
+
sortedcontainers==2.4.0
|
65 |
+
soundfile==0.10.3.post1
|
66 |
+
threadpoolctl==3.1.0
|
67 |
+
tokenizers==0.12.1
|
68 |
+
torch==1.11.0+cu113
|
69 |
+
torchaudio==0.11.0+cu113
|
70 |
+
torchvision==0.12.0+cu113
|
71 |
+
tqdm==4.64.0
|
72 |
+
transformers==4.18.0
|
73 |
+
typing-extensions==4.2.0
|
74 |
+
urllib3==1.26.9
|
75 |
+
wandb==0.12.15
|
76 |
+
xxhash==3.0.0
|
77 |
+
yarl==1.7.2
|
wandb/run-20220830_110431-yvlr8ud4/files/wandb-metadata.json
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"os": "Linux-5.13.0-40-generic-x86_64-with-glibc2.34",
|
3 |
+
"python": "3.9.7",
|
4 |
+
"heartbeatAt": "2022-08-30T09:04:32.564832",
|
5 |
+
"startedAt": "2022-08-30T09:04:31.465309",
|
6 |
+
"docker": null,
|
7 |
+
"cpu_count": 96,
|
8 |
+
"cuda": null,
|
9 |
+
"args": [
|
10 |
+
"--model_name_or_path=facebook/wav2vec2-xls-r-1b",
|
11 |
+
"--hub_model_id=NbAiLab/wav2vec2-1b-nst",
|
12 |
+
"--dataset_name=NbAiLab/NST",
|
13 |
+
"--dataset_config=no-close",
|
14 |
+
"--output_dir=./",
|
15 |
+
"--overwrite_output_dir",
|
16 |
+
"--num_train_epochs=40",
|
17 |
+
"--per_device_train_batch_size=12",
|
18 |
+
"--per_device_eval_batch_size=12",
|
19 |
+
"--gradient_accumulation_steps=2",
|
20 |
+
"--learning_rate=2e-5",
|
21 |
+
"--warmup_steps=2000",
|
22 |
+
"--length_column_name=input_length",
|
23 |
+
"--evaluation_strategy=steps",
|
24 |
+
"--text_column_name=text",
|
25 |
+
"--save_steps=500",
|
26 |
+
"--eval_steps=500",
|
27 |
+
"--logging_steps=100",
|
28 |
+
"--layerdrop=0.041",
|
29 |
+
"--attention_dropout=0.094",
|
30 |
+
"--activation_dropout=0.055",
|
31 |
+
"--hidden_dropout=0.047",
|
32 |
+
"--save_total_limit=3",
|
33 |
+
"--freeze_feature_encoder",
|
34 |
+
"--feat_proj_dropout=0.04",
|
35 |
+
"--mask_time_prob=0.082",
|
36 |
+
"--mask_time_length=10",
|
37 |
+
"--mask_feature_prob=0.25",
|
38 |
+
"--mask_feature_length=64",
|
39 |
+
"--gradient_checkpointing",
|
40 |
+
"--min_duration_in_seconds=0.5",
|
41 |
+
"--max_duration_in_seconds=30.0",
|
42 |
+
"--use_auth_token",
|
43 |
+
"--seed=42",
|
44 |
+
"--fp16",
|
45 |
+
"--group_by_length",
|
46 |
+
"--do_train",
|
47 |
+
"--do_eval",
|
48 |
+
"--push_to_hub",
|
49 |
+
"--preprocessing_num_workers=32",
|
50 |
+
"--ctc_zero_infinity"
|
51 |
+
],
|
52 |
+
"state": "running",
|
53 |
+
"program": "/mnt/lv_ai_1_dante/ml/models/wav2vec2-1b-nst/run_speech_recognition_ctc.py",
|
54 |
+
"codePath": "run_speech_recognition_ctc.py",
|
55 |
+
"git": {
|
56 |
+
"remote": "https://huggingface.co/NbAiLab/wav2vec2-1b-nst",
|
57 |
+
"commit": "aa7bcfb7473f662ac8f42c246a56419e5900d9c6"
|
58 |
+
},
|
59 |
+
"email": "[email protected]",
|
60 |
+
"root": "/mnt/lv_ai_1_dante/ml/models/wav2vec2-1b-nst",
|
61 |
+
"host": "dante",
|
62 |
+
"username": "rolvb",
|
63 |
+
"executable": "/mnt/lv_ai_1_dante/ml/rolvb/venv/bin/python"
|
64 |
+
}
|
wandb/run-20220830_110431-yvlr8ud4/files/wandb-summary.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
wandb/run-20220830_110431-yvlr8ud4/logs/debug-internal.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
wandb/run-20220830_110431-yvlr8ud4/logs/debug.log
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2022-08-30 11:04:31,468 INFO MainThread:3759610 [wandb_setup.py:_flush():75] Loading settings from /home/rolvb/.config/wandb/settings
|
2 |
+
2022-08-30 11:04:31,468 INFO MainThread:3759610 [wandb_setup.py:_flush():75] Loading settings from /mnt/lv_ai_1_dante/ml/models/wav2vec2-1b-nst/wandb/settings
|
3 |
+
2022-08-30 11:04:31,468 INFO MainThread:3759610 [wandb_setup.py:_flush():75] Loading settings from environment variables: {'project': 'wav2vec2', 'entity': 'NbAiLab'}
|
4 |
+
2022-08-30 11:04:31,468 INFO MainThread:3759610 [wandb_setup.py:_flush():75] Inferring run settings from compute environment: {'program_relpath': 'run_speech_recognition_ctc.py', 'program': '/mnt/lv_ai_1_dante/ml/models/wav2vec2-1b-nst/run_speech_recognition_ctc.py'}
|
5 |
+
2022-08-30 11:04:31,468 INFO MainThread:3759610 [wandb_init.py:_log_setup():437] Logging user logs to /mnt/lv_ai_1_dante/ml/models/wav2vec2-1b-nst/wandb/run-20220830_110431-yvlr8ud4/logs/debug.log
|
6 |
+
2022-08-30 11:04:31,468 INFO MainThread:3759610 [wandb_init.py:_log_setup():438] Logging internal logs to /mnt/lv_ai_1_dante/ml/models/wav2vec2-1b-nst/wandb/run-20220830_110431-yvlr8ud4/logs/debug-internal.log
|
7 |
+
2022-08-30 11:04:31,469 INFO MainThread:3759610 [wandb_init.py:init():471] calling init triggers
|
8 |
+
2022-08-30 11:04:31,469 INFO MainThread:3759610 [wandb_init.py:init():474] wandb.init called with sweep_config: {}
|
9 |
+
config: {}
|
10 |
+
2022-08-30 11:04:31,469 INFO MainThread:3759610 [wandb_init.py:init():524] starting backend
|
11 |
+
2022-08-30 11:04:31,469 INFO MainThread:3759610 [backend.py:_multiprocessing_setup():97] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
|
12 |
+
2022-08-30 11:04:31,559 INFO MainThread:3759610 [backend.py:ensure_launched():217] starting backend process...
|
13 |
+
2022-08-30 11:04:31,631 INFO MainThread:3759610 [backend.py:ensure_launched():222] started backend process with pid: 3760704
|
14 |
+
2022-08-30 11:04:31,633 INFO MainThread:3759610 [wandb_init.py:init():533] backend started and connected
|
15 |
+
2022-08-30 11:04:31,642 INFO MainThread:3759610 [wandb_init.py:init():597] updated telemetry
|
16 |
+
2022-08-30 11:04:31,820 INFO MainThread:3759610 [wandb_init.py:init():628] communicating run to backend with 30 second timeout
|
17 |
+
2022-08-30 11:04:32,385 INFO MainThread:3759610 [wandb_run.py:_on_init():1923] communicating current version
|
18 |
+
2022-08-30 11:04:32,542 INFO MainThread:3759610 [wandb_run.py:_on_init():1927] got version response upgrade_message: "wandb version 0.13.2 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
|
19 |
+
|
20 |
+
2022-08-30 11:04:32,542 INFO MainThread:3759610 [wandb_init.py:init():659] starting run threads in backend
|
21 |
+
2022-08-30 11:04:32,598 INFO MainThread:3759610 [wandb_run.py:_console_start():1897] atexit reg
|
22 |
+
2022-08-30 11:04:32,599 INFO MainThread:3759610 [wandb_run.py:_redirect():1770] redirect: SettingsConsole.REDIRECT
|
23 |
+
2022-08-30 11:04:32,600 INFO MainThread:3759610 [wandb_run.py:_redirect():1775] Redirecting console.
|
24 |
+
2022-08-30 11:04:32,602 INFO MainThread:3759610 [wandb_run.py:_redirect():1831] Redirects installed.
|
25 |
+
2022-08-30 11:04:32,602 INFO MainThread:3759610 [wandb_init.py:init():684] run started, returning control to user process
|
26 |
+
2022-08-30 11:04:32,630 INFO MainThread:3759610 [wandb_run.py:_config_callback():1131] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 38, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-xls-r-1b', 'transformers_version': '4.18.0', 'feat_extract_dropout': 0.0, 'model_type': 'wav2vec2', 'num_feat_extract_layers': 7, 'hidden_size': 1280, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 48, 'intermediate_size': 5120, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.047, 'attention_dropout': 0.094, 'activation_dropout': 0.055, 'feat_proj_dropout': 0.04, 'final_dropout': 0.0, 'layerdrop': 0.041, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 41, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.082, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.25, 'mask_feature_length': 64, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 1024, 'proj_codevector_dim': 1024, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'mean', 'ctc_zero_infinity': True, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1280, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 12, 'per_device_eval_batch_size': 12, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 2, 'eval_accumulation_steps': 'None', 'eval_delay': 0, 'learning_rate': 2e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 40.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 2000, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Aug30_11-03-31_dante', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 100, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'data_seed': 'None', 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'NbAiLab/wav2vec2-1b-nst', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'train_batch_size': 12, 'eval_batch_size': 12}
|
27 |
+
2022-08-30 11:04:32,633 INFO MainThread:3759610 [wandb_watch.py:watch():47] Watching
|
wandb/run-20220830_110431-yvlr8ud4/run-yvlr8ud4.wandb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:259022a5c62fb30a5d197a39fe99951a7c9f97219d04c793dce9fcceb827a9c8
|
3 |
+
size 8649498
|