mikr commited on
Commit
40f1bc6
1 Parent(s): b61a544

Training in progress, step 1000

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
added_tokens.json ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|af|>": 50327,
3
+ "<|am|>": 50334,
4
+ "<|ar|>": 50272,
5
+ "<|as|>": 50350,
6
+ "<|az|>": 50304,
7
+ "<|ba|>": 50355,
8
+ "<|be|>": 50330,
9
+ "<|bg|>": 50292,
10
+ "<|bn|>": 50302,
11
+ "<|bo|>": 50347,
12
+ "<|br|>": 50309,
13
+ "<|bs|>": 50315,
14
+ "<|ca|>": 50270,
15
+ "<|cs|>": 50283,
16
+ "<|cy|>": 50297,
17
+ "<|da|>": 50285,
18
+ "<|de|>": 50261,
19
+ "<|el|>": 50281,
20
+ "<|endoftext|>": 50257,
21
+ "<|en|>": 50259,
22
+ "<|es|>": 50262,
23
+ "<|et|>": 50307,
24
+ "<|eu|>": 50310,
25
+ "<|fa|>": 50300,
26
+ "<|fi|>": 50277,
27
+ "<|fo|>": 50338,
28
+ "<|fr|>": 50265,
29
+ "<|gl|>": 50319,
30
+ "<|gu|>": 50333,
31
+ "<|haw|>": 50352,
32
+ "<|ha|>": 50354,
33
+ "<|hi|>": 50276,
34
+ "<|hr|>": 50291,
35
+ "<|ht|>": 50339,
36
+ "<|hu|>": 50286,
37
+ "<|hy|>": 50312,
38
+ "<|id|>": 50275,
39
+ "<|is|>": 50311,
40
+ "<|it|>": 50274,
41
+ "<|iw|>": 50279,
42
+ "<|ja|>": 50266,
43
+ "<|jw|>": 50356,
44
+ "<|ka|>": 50329,
45
+ "<|kk|>": 50316,
46
+ "<|km|>": 50323,
47
+ "<|kn|>": 50306,
48
+ "<|ko|>": 50264,
49
+ "<|la|>": 50294,
50
+ "<|lb|>": 50345,
51
+ "<|ln|>": 50353,
52
+ "<|lo|>": 50336,
53
+ "<|lt|>": 50293,
54
+ "<|lv|>": 50301,
55
+ "<|mg|>": 50349,
56
+ "<|mi|>": 50295,
57
+ "<|mk|>": 50308,
58
+ "<|ml|>": 50296,
59
+ "<|mn|>": 50314,
60
+ "<|mr|>": 50320,
61
+ "<|ms|>": 50282,
62
+ "<|mt|>": 50343,
63
+ "<|my|>": 50346,
64
+ "<|ne|>": 50313,
65
+ "<|nl|>": 50271,
66
+ "<|nn|>": 50342,
67
+ "<|nocaptions|>": 50362,
68
+ "<|notimestamps|>": 50363,
69
+ "<|no|>": 50288,
70
+ "<|oc|>": 50328,
71
+ "<|pa|>": 50321,
72
+ "<|pl|>": 50269,
73
+ "<|ps|>": 50340,
74
+ "<|pt|>": 50267,
75
+ "<|ro|>": 50284,
76
+ "<|ru|>": 50263,
77
+ "<|sa|>": 50344,
78
+ "<|sd|>": 50332,
79
+ "<|si|>": 50322,
80
+ "<|sk|>": 50298,
81
+ "<|sl|>": 50305,
82
+ "<|sn|>": 50324,
83
+ "<|so|>": 50326,
84
+ "<|sq|>": 50317,
85
+ "<|sr|>": 50303,
86
+ "<|startoflm|>": 50360,
87
+ "<|startofprev|>": 50361,
88
+ "<|startoftranscript|>": 50258,
89
+ "<|su|>": 50357,
90
+ "<|sv|>": 50273,
91
+ "<|sw|>": 50318,
92
+ "<|ta|>": 50287,
93
+ "<|te|>": 50299,
94
+ "<|tg|>": 50331,
95
+ "<|th|>": 50289,
96
+ "<|tk|>": 50341,
97
+ "<|tl|>": 50348,
98
+ "<|transcribe|>": 50359,
99
+ "<|translate|>": 50358,
100
+ "<|tr|>": 50268,
101
+ "<|tt|>": 50351,
102
+ "<|uk|>": 50280,
103
+ "<|ur|>": 50290,
104
+ "<|uz|>": 50337,
105
+ "<|vi|>": 50278,
106
+ "<|yi|>": 50335,
107
+ "<|yo|>": 50325,
108
+ "<|zh|>": 50260
109
+ }
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-small",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "WhisperForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "begin_suppress_tokens": [
10
+ 220,
11
+ 50257
12
+ ],
13
+ "bos_token_id": 50257,
14
+ "d_model": 768,
15
+ "decoder_attention_heads": 12,
16
+ "decoder_ffn_dim": 3072,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 12,
19
+ "decoder_start_token_id": 50258,
20
+ "dropout": 0.0,
21
+ "encoder_attention_heads": 12,
22
+ "encoder_ffn_dim": 3072,
23
+ "encoder_layerdrop": 0.0,
24
+ "encoder_layers": 12,
25
+ "eos_token_id": 50257,
26
+ "forced_decoder_ids": null,
27
+ "init_std": 0.02,
28
+ "is_encoder_decoder": true,
29
+ "max_length": 448,
30
+ "max_source_positions": 1500,
31
+ "max_target_positions": 448,
32
+ "model_type": "whisper",
33
+ "num_hidden_layers": 12,
34
+ "num_mel_bins": 80,
35
+ "pad_token_id": 50257,
36
+ "scale_embedding": false,
37
+ "torch_dtype": "float16",
38
+ "transformers_version": "4.26.0.dev0",
39
+ "use_cache": false,
40
+ "vocab_size": 51865
41
+ }
ds_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {
3
+ "enabled": "auto",
4
+ "loss_scale": 0,
5
+ "loss_scale_window": 1000,
6
+ "initial_scale_power": 16,
7
+ "hysteresis": 2,
8
+ "min_loss_scale": 1
9
+ },
10
+
11
+ "optimizer": {
12
+ "type": "AdamW",
13
+ "params": {
14
+ "lr": "auto",
15
+ "betas": "auto",
16
+ "eps": "auto",
17
+ "weight_decay": "auto"
18
+ }
19
+ },
20
+
21
+ "scheduler": {
22
+ "type": "WarmupDecayLR",
23
+ "params": {
24
+ "last_batch_iteration": -1,
25
+ "total_num_steps": "auto",
26
+ "warmup_min_lr": "auto",
27
+ "warmup_max_lr": "auto",
28
+ "warmup_num_steps": "auto"
29
+ }
30
+ },
31
+
32
+ "zero_optimization": {
33
+ "stage": 2,
34
+ "offload_optimizer": {
35
+ "device": "cpu",
36
+ "pin_memory": true
37
+ },
38
+ "allgather_partitions": true,
39
+ "allgather_bucket_size": 2e8,
40
+ "overlap_comm": true,
41
+ "reduce_scatter": true,
42
+ "reduce_bucket_size": 2e8,
43
+ "contiguous_gradients": true
44
+ },
45
+
46
+ "gradient_accumulation_steps": "auto",
47
+ "gradient_clipping": "auto",
48
+ "train_batch_size": "auto",
49
+ "train_micro_batch_size_per_gpu": "auto"
50
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
normalizer.json ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accessorise": "accessorize",
3
+ "accessorised": "accessorized",
4
+ "accessorises": "accessorizes",
5
+ "accessorising": "accessorizing",
6
+ "acclimatisation": "acclimatization",
7
+ "acclimatise": "acclimatize",
8
+ "acclimatised": "acclimatized",
9
+ "acclimatises": "acclimatizes",
10
+ "acclimatising": "acclimatizing",
11
+ "accoutrements": "accouterments",
12
+ "aeon": "eon",
13
+ "aeons": "eons",
14
+ "aerogramme": "aerogram",
15
+ "aerogrammes": "aerograms",
16
+ "aeroplane": "airplane",
17
+ "aeroplanes": "airplanes",
18
+ "aesthete": "esthete",
19
+ "aesthetes": "esthetes",
20
+ "aesthetic": "esthetic",
21
+ "aesthetically": "esthetically",
22
+ "aesthetics": "esthetics",
23
+ "aetiology": "etiology",
24
+ "ageing": "aging",
25
+ "aggrandisement": "aggrandizement",
26
+ "agonise": "agonize",
27
+ "agonised": "agonized",
28
+ "agonises": "agonizes",
29
+ "agonising": "agonizing",
30
+ "agonisingly": "agonizingly",
31
+ "almanack": "almanac",
32
+ "almanacks": "almanacs",
33
+ "aluminium": "aluminum",
34
+ "amortisable": "amortizable",
35
+ "amortisation": "amortization",
36
+ "amortisations": "amortizations",
37
+ "amortise": "amortize",
38
+ "amortised": "amortized",
39
+ "amortises": "amortizes",
40
+ "amortising": "amortizing",
41
+ "amphitheatre": "amphitheater",
42
+ "amphitheatres": "amphitheaters",
43
+ "anaemia": "anemia",
44
+ "anaemic": "anemic",
45
+ "anaesthesia": "anesthesia",
46
+ "anaesthetic": "anesthetic",
47
+ "anaesthetics": "anesthetics",
48
+ "anaesthetise": "anesthetize",
49
+ "anaesthetised": "anesthetized",
50
+ "anaesthetises": "anesthetizes",
51
+ "anaesthetising": "anesthetizing",
52
+ "anaesthetist": "anesthetist",
53
+ "anaesthetists": "anesthetists",
54
+ "anaesthetize": "anesthetize",
55
+ "anaesthetized": "anesthetized",
56
+ "anaesthetizes": "anesthetizes",
57
+ "anaesthetizing": "anesthetizing",
58
+ "analogue": "analog",
59
+ "analogues": "analogs",
60
+ "analyse": "analyze",
61
+ "analysed": "analyzed",
62
+ "analyses": "analyzes",
63
+ "analysing": "analyzing",
64
+ "anglicise": "anglicize",
65
+ "anglicised": "anglicized",
66
+ "anglicises": "anglicizes",
67
+ "anglicising": "anglicizing",
68
+ "annualised": "annualized",
69
+ "antagonise": "antagonize",
70
+ "antagonised": "antagonized",
71
+ "antagonises": "antagonizes",
72
+ "antagonising": "antagonizing",
73
+ "apologise": "apologize",
74
+ "apologised": "apologized",
75
+ "apologises": "apologizes",
76
+ "apologising": "apologizing",
77
+ "appal": "appall",
78
+ "appals": "appalls",
79
+ "appetiser": "appetizer",
80
+ "appetisers": "appetizers",
81
+ "appetising": "appetizing",
82
+ "appetisingly": "appetizingly",
83
+ "arbour": "arbor",
84
+ "arbours": "arbors",
85
+ "archaeologically": "archeologically",
86
+ "archaeologist": "archeologist",
87
+ "archaeologists": "archeologists",
88
+ "archaeology": "archeology</span>",
89
+ "archeological": "archaeological",
90
+ "ardour": "ardor",
91
+ "armour": "armor",
92
+ "armoured": "armored",
93
+ "armourer": "armorer",
94
+ "armourers": "armorers",
95
+ "armouries": "armories",
96
+ "armoury": "armory",
97
+ "artefact": "artifact",
98
+ "artefacts": "artifacts",
99
+ "authorise": "authorize",
100
+ "authorised": "authorized",
101
+ "authorises": "authorizes",
102
+ "authorising": "authorizing",
103
+ "axe": "ax",
104
+ "backpedalled": "backpedaled",
105
+ "backpedalling": "backpedaling",
106
+ "bannister": "banister",
107
+ "bannisters": "banisters",
108
+ "baptise": "baptize",
109
+ "baptised": "baptized",
110
+ "baptises": "baptizes",
111
+ "baptising": "baptizing",
112
+ "bastardise": "bastardize",
113
+ "bastardised": "bastardized",
114
+ "bastardises": "bastardizes",
115
+ "bastardising": "bastardizing",
116
+ "battleax": "battleaxe",
117
+ "baulk": "balk",
118
+ "baulked": "balked",
119
+ "baulking": "balking",
120
+ "baulks": "balks",
121
+ "bedevilled": "bedeviled",
122
+ "bedevilling": "bedeviling",
123
+ "behaviour": "behavior",
124
+ "behavioural": "behavioral",
125
+ "behaviourism": "behaviorism",
126
+ "behaviourist": "behaviorist",
127
+ "behaviourists": "behaviorists",
128
+ "behaviours": "behaviors",
129
+ "behove": "behoove",
130
+ "behoved": "behooved",
131
+ "behoves": "behooves",
132
+ "bejewelled": "bejeweled",
133
+ "belabour": "belabor",
134
+ "belaboured": "belabored",
135
+ "belabouring": "belaboring",
136
+ "belabours": "belabors",
137
+ "bevelled": "beveled",
138
+ "bevvies": "bevies",
139
+ "bevvy": "bevy",
140
+ "biassed": "biased",
141
+ "biassing": "biasing",
142
+ "bingeing": "binging",
143
+ "bougainvillaea": "bougainvillea",
144
+ "bougainvillaeas": "bougainvilleas",
145
+ "bowdlerise": "bowdlerize",
146
+ "bowdlerised": "bowdlerized",
147
+ "bowdlerises": "bowdlerizes",
148
+ "bowdlerising": "bowdlerizing",
149
+ "breathalyse": "breathalyze",
150
+ "breathalysed": "breathalyzed",
151
+ "breathalyser": "breathalyzer",
152
+ "breathalysers": "breathalyzers",
153
+ "breathalyses": "breathalyzes",
154
+ "breathalysing": "breathalyzing",
155
+ "brutalise": "brutalize",
156
+ "brutalised": "brutalized",
157
+ "brutalises": "brutalizes",
158
+ "brutalising": "brutalizing",
159
+ "busses": "buses",
160
+ "bussing": "busing",
161
+ "caesarean": "cesarean",
162
+ "caesareans": "cesareans",
163
+ "calibre": "caliber",
164
+ "calibres": "calibers",
165
+ "calliper": "caliper",
166
+ "callipers": "calipers",
167
+ "callisthenics": "calisthenics",
168
+ "canalise": "canalize",
169
+ "canalised": "canalized",
170
+ "canalises": "canalizes",
171
+ "canalising": "canalizing",
172
+ "cancelation": "cancellation",
173
+ "cancelations": "cancellations",
174
+ "cancelled": "canceled",
175
+ "cancelling": "canceling",
176
+ "candour": "candor",
177
+ "cannibalise": "cannibalize",
178
+ "cannibalised": "cannibalized",
179
+ "cannibalises": "cannibalizes",
180
+ "cannibalising": "cannibalizing",
181
+ "canonise": "canonize",
182
+ "canonised": "canonized",
183
+ "canonises": "canonizes",
184
+ "canonising": "canonizing",
185
+ "capitalise": "capitalize",
186
+ "capitalised": "capitalized",
187
+ "capitalises": "capitalizes",
188
+ "capitalising": "capitalizing",
189
+ "caramelise": "caramelize",
190
+ "caramelised": "caramelized",
191
+ "caramelises": "caramelizes",
192
+ "caramelising": "caramelizing",
193
+ "carbonise": "carbonize",
194
+ "carbonised": "carbonized",
195
+ "carbonises": "carbonizes",
196
+ "carbonising": "carbonizing",
197
+ "carolled": "caroled",
198
+ "carolling": "caroling",
199
+ "catalogue": "catalog",
200
+ "catalogued": "cataloged",
201
+ "catalogues": "catalogs",
202
+ "cataloguing": "cataloging",
203
+ "catalyse": "catalyze",
204
+ "catalysed": "catalyzed",
205
+ "catalyses": "catalyzes",
206
+ "catalysing": "catalyzing",
207
+ "categorise": "categorize",
208
+ "categorised": "categorized",
209
+ "categorises": "categorizes",
210
+ "categorising": "categorizing",
211
+ "cauterise": "cauterize",
212
+ "cauterised": "cauterized",
213
+ "cauterises": "cauterizes",
214
+ "cauterising": "cauterizing",
215
+ "cavilled": "caviled",
216
+ "cavilling": "caviling",
217
+ "centigramme": "centigram",
218
+ "centigrammes": "centigrams",
219
+ "centilitre": "centiliter",
220
+ "centilitres": "centiliters",
221
+ "centimetre": "centimeter",
222
+ "centimetres": "centimeters",
223
+ "centralise": "centralize",
224
+ "centralised": "centralized",
225
+ "centralises": "centralizes",
226
+ "centralising": "centralizing",
227
+ "centre": "center",
228
+ "centred": "centered",
229
+ "centrefold": "centerfold",
230
+ "centrefolds": "centerfolds",
231
+ "centrepiece": "centerpiece",
232
+ "centrepieces": "centerpieces",
233
+ "centres": "centers",
234
+ "channelled": "channeled",
235
+ "channelling": "channeling",
236
+ "characterise": "characterize",
237
+ "characterised": "characterized",
238
+ "characterises": "characterizes",
239
+ "characterising": "characterizing",
240
+ "cheque": "check",
241
+ "chequebook": "checkbook",
242
+ "chequebooks": "checkbooks",
243
+ "chequered": "checkered",
244
+ "cheques": "checks",
245
+ "chilli": "chili",
246
+ "chimaera": "chimera",
247
+ "chimaeras": "chimeras",
248
+ "chiselled": "chiseled",
249
+ "chiselling": "chiseling",
250
+ "circularise": "circularize",
251
+ "circularised": "circularized",
252
+ "circularises": "circularizes",
253
+ "circularising": "circularizing",
254
+ "civilise": "civilize",
255
+ "civilised": "civilized",
256
+ "civilises": "civilizes",
257
+ "civilising": "civilizing",
258
+ "clamour": "clamor",
259
+ "clamoured": "clamored",
260
+ "clamouring": "clamoring",
261
+ "clamours": "clamors",
262
+ "clangour": "clangor",
263
+ "clarinettist": "clarinetist",
264
+ "clarinettists": "clarinetists",
265
+ "collectivise": "collectivize",
266
+ "collectivised": "collectivized",
267
+ "collectivises": "collectivizes",
268
+ "collectivising": "collectivizing",
269
+ "colonisation": "colonization",
270
+ "colonise": "colonize",
271
+ "colonised": "colonized",
272
+ "coloniser": "colonizer",
273
+ "colonisers": "colonizers",
274
+ "colonises": "colonizes",
275
+ "colonising": "colonizing",
276
+ "colour": "color",
277
+ "colourant": "colorant",
278
+ "colourants": "colorants",
279
+ "coloured": "colored",
280
+ "coloureds": "coloreds",
281
+ "colourful": "colorful",
282
+ "colourfully": "colorfully",
283
+ "colouring": "coloring",
284
+ "colourize": "colorize",
285
+ "colourized": "colorized",
286
+ "colourizes": "colorizes",
287
+ "colourizing": "colorizing",
288
+ "colourless": "colorless",
289
+ "colours": "colors",
290
+ "commercialise": "commercialize",
291
+ "commercialised": "commercialized",
292
+ "commercialises": "commercializes",
293
+ "commercialising": "commercializing",
294
+ "compartmentalise": "compartmentalize",
295
+ "compartmentalised": "compartmentalized",
296
+ "compartmentalises": "compartmentalizes",
297
+ "compartmentalising": "compartmentalizing",
298
+ "computerise": "computerize",
299
+ "computerised": "computerized",
300
+ "computerises": "computerizes",
301
+ "computerising": "computerizing",
302
+ "conceptualise": "conceptualize",
303
+ "conceptualised": "conceptualized",
304
+ "conceptualises": "conceptualizes",
305
+ "conceptualising": "conceptualizing",
306
+ "connexion": "connection",
307
+ "connexions": "connections",
308
+ "contextualise": "contextualize",
309
+ "contextualised": "contextualized",
310
+ "contextualises": "contextualizes",
311
+ "contextualising": "contextualizing",
312
+ "cosier": "cozier",
313
+ "cosies": "cozies",
314
+ "cosiest": "coziest",
315
+ "cosily": "cozily",
316
+ "cosiness": "coziness",
317
+ "cosy": "cozy",
318
+ "councillor": "councilor",
319
+ "councillors": "councilors",
320
+ "counselled": "counseled",
321
+ "counselling": "counseling",
322
+ "counsellor": "counselor",
323
+ "counsellors": "counselors",
324
+ "crenelated": "crenellated",
325
+ "criminalise": "criminalize",
326
+ "criminalised": "criminalized",
327
+ "criminalises": "criminalizes",
328
+ "criminalising": "criminalizing",
329
+ "criticise": "criticize",
330
+ "criticised": "criticized",
331
+ "criticises": "criticizes",
332
+ "criticising": "criticizing",
333
+ "crueller": "crueler",
334
+ "cruellest": "cruelest",
335
+ "crystallisation": "crystallization",
336
+ "crystallise": "crystallize",
337
+ "crystallised": "crystallized",
338
+ "crystallises": "crystallizes",
339
+ "crystallising": "crystallizing",
340
+ "cudgelled": "cudgeled",
341
+ "cudgelling": "cudgeling",
342
+ "customise": "customize",
343
+ "customised": "customized",
344
+ "customises": "customizes",
345
+ "customising": "customizing",
346
+ "cypher": "cipher",
347
+ "cyphers": "ciphers",
348
+ "decentralisation": "decentralization",
349
+ "decentralise": "decentralize",
350
+ "decentralised": "decentralized",
351
+ "decentralises": "decentralizes",
352
+ "decentralising": "decentralizing",
353
+ "decriminalisation": "decriminalization",
354
+ "decriminalise": "decriminalize",
355
+ "decriminalised": "decriminalized",
356
+ "decriminalises": "decriminalizes",
357
+ "decriminalising": "decriminalizing",
358
+ "defence": "defense",
359
+ "defenceless": "defenseless",
360
+ "defences": "defenses",
361
+ "dehumanisation": "dehumanization",
362
+ "dehumanise": "dehumanize",
363
+ "dehumanised": "dehumanized",
364
+ "dehumanises": "dehumanizes",
365
+ "dehumanising": "dehumanizing",
366
+ "demeanour": "demeanor",
367
+ "demilitarisation": "demilitarization",
368
+ "demilitarise": "demilitarize",
369
+ "demilitarised": "demilitarized",
370
+ "demilitarises": "demilitarizes",
371
+ "demilitarising": "demilitarizing",
372
+ "demobilisation": "demobilization",
373
+ "demobilise": "demobilize",
374
+ "demobilised": "demobilized",
375
+ "demobilises": "demobilizes",
376
+ "demobilising": "demobilizing",
377
+ "democratisation": "democratization",
378
+ "democratise": "democratize",
379
+ "democratised": "democratized",
380
+ "democratises": "democratizes",
381
+ "democratising": "democratizing",
382
+ "demonise": "demonize",
383
+ "demonised": "demonized",
384
+ "demonises": "demonizes",
385
+ "demonising": "demonizing",
386
+ "demoralisation": "demoralization",
387
+ "demoralise": "demoralize",
388
+ "demoralised": "demoralized",
389
+ "demoralises": "demoralizes",
390
+ "demoralising": "demoralizing",
391
+ "denationalisation": "denationalization",
392
+ "denationalise": "denationalize",
393
+ "denationalised": "denationalized",
394
+ "denationalises": "denationalizes",
395
+ "denationalising": "denationalizing",
396
+ "deodorise": "deodorize",
397
+ "deodorised": "deodorized",
398
+ "deodorises": "deodorizes",
399
+ "deodorising": "deodorizing",
400
+ "depersonalise": "depersonalize",
401
+ "depersonalised": "depersonalized",
402
+ "depersonalises": "depersonalizes",
403
+ "depersonalising": "depersonalizing",
404
+ "deputise": "deputize",
405
+ "deputised": "deputized",
406
+ "deputises": "deputizes",
407
+ "deputising": "deputizing",
408
+ "desensitisation": "desensitization",
409
+ "desensitise": "desensitize",
410
+ "desensitised": "desensitized",
411
+ "desensitises": "desensitizes",
412
+ "desensitising": "desensitizing",
413
+ "destabilisation": "destabilization",
414
+ "destabilise": "destabilize",
415
+ "destabilised": "destabilized",
416
+ "destabilises": "destabilizes",
417
+ "destabilising": "destabilizing",
418
+ "dialled": "dialed",
419
+ "dialling": "dialing",
420
+ "dialogue": "dialog",
421
+ "dialogues": "dialogs",
422
+ "diarrhoea": "diarrhea",
423
+ "digitise": "digitize",
424
+ "digitised": "digitized",
425
+ "digitises": "digitizes",
426
+ "digitising": "digitizing",
427
+ "disc": "disk",
428
+ "discolour": "discolor",
429
+ "discoloured": "discolored",
430
+ "discolouring": "discoloring",
431
+ "discolours": "discolors",
432
+ "discs": "disks",
433
+ "disembowelled": "disemboweled",
434
+ "disembowelling": "disemboweling",
435
+ "disfavour": "disfavor",
436
+ "dishevelled": "disheveled",
437
+ "dishonour": "dishonor",
438
+ "dishonourable": "dishonorable",
439
+ "dishonourably": "dishonorably",
440
+ "dishonoured": "dishonored",
441
+ "dishonouring": "dishonoring",
442
+ "dishonours": "dishonors",
443
+ "disorganisation": "disorganization",
444
+ "disorganised": "disorganized",
445
+ "distil": "distill",
446
+ "distils": "distills",
447
+ "dramatisation": "dramatization",
448
+ "dramatisations": "dramatizations",
449
+ "dramatise": "dramatize",
450
+ "dramatised": "dramatized",
451
+ "dramatises": "dramatizes",
452
+ "dramatising": "dramatizing",
453
+ "draught": "draft",
454
+ "draughtboard": "draftboard",
455
+ "draughtboards": "draftboards",
456
+ "draughtier": "draftier",
457
+ "draughtiest": "draftiest",
458
+ "draughts": "drafts",
459
+ "draughtsman": "draftsman",
460
+ "draughtsmanship": "draftsmanship",
461
+ "draughtsmen": "draftsmen",
462
+ "draughtswoman": "draftswoman",
463
+ "draughtswomen": "draftswomen",
464
+ "draughty": "drafty",
465
+ "drivelled": "driveled",
466
+ "drivelling": "driveling",
467
+ "duelled": "dueled",
468
+ "duelling": "dueling",
469
+ "economise": "economize",
470
+ "economised": "economized",
471
+ "economises": "economizes",
472
+ "economising": "economizing",
473
+ "editorialise": "editorialize",
474
+ "editorialised": "editorialized",
475
+ "editorialises": "editorializes",
476
+ "editorialising": "editorializing",
477
+ "edoema": "edema",
478
+ "empathise": "empathize",
479
+ "empathised": "empathized",
480
+ "empathises": "empathizes",
481
+ "empathising": "empathizing",
482
+ "emphasise": "emphasize",
483
+ "emphasised": "emphasized",
484
+ "emphasises": "emphasizes",
485
+ "emphasising": "emphasizing",
486
+ "enamelled": "enameled",
487
+ "enamelling": "enameling",
488
+ "enamoured": "enamored",
489
+ "encyclopaedia": "encyclopedia",
490
+ "encyclopaedias": "encyclopedias",
491
+ "encyclopaedic": "encyclopedic",
492
+ "endeavour": "endeavor",
493
+ "endeavoured": "endeavored",
494
+ "endeavouring": "endeavoring",
495
+ "endeavours": "endeavors",
496
+ "energise": "energize",
497
+ "energised": "energized",
498
+ "energises": "energizes",
499
+ "energising": "energizing",
500
+ "enrol": "enroll",
501
+ "enrols": "enrolls",
502
+ "enthral": "enthrall",
503
+ "enthrals": "enthralls",
504
+ "epaulette": "epaulet",
505
+ "epaulettes": "epaulets",
506
+ "epicentre": "epicenter",
507
+ "epicentres": "epicenters",
508
+ "epilogue": "epilog",
509
+ "epilogues": "epilogs",
510
+ "epitomise": "epitomize",
511
+ "epitomised": "epitomized",
512
+ "epitomises": "epitomizes",
513
+ "epitomising": "epitomizing",
514
+ "equalisation": "equalization",
515
+ "equalise": "equalize",
516
+ "equalised": "equalized",
517
+ "equaliser": "equalizer",
518
+ "equalisers": "equalizers",
519
+ "equalises": "equalizes",
520
+ "equalising": "equalizing",
521
+ "eulogise": "eulogize",
522
+ "eulogised": "eulogized",
523
+ "eulogises": "eulogizes",
524
+ "eulogising": "eulogizing",
525
+ "evangelise": "evangelize",
526
+ "evangelised": "evangelized",
527
+ "evangelises": "evangelizes",
528
+ "evangelising": "evangelizing",
529
+ "exorcise": "exorcize",
530
+ "exorcised": "exorcized",
531
+ "exorcises": "exorcizes",
532
+ "exorcising": "exorcizing",
533
+ "extemporisation": "extemporization",
534
+ "extemporise": "extemporize",
535
+ "extemporised": "extemporized",
536
+ "extemporises": "extemporizes",
537
+ "extemporising": "extemporizing",
538
+ "externalisation": "externalization",
539
+ "externalisations": "externalizations",
540
+ "externalise": "externalize",
541
+ "externalised": "externalized",
542
+ "externalises": "externalizes",
543
+ "externalising": "externalizing",
544
+ "factorise": "factorize",
545
+ "factorised": "factorized",
546
+ "factorises": "factorizes",
547
+ "factorising": "factorizing",
548
+ "faecal": "fecal",
549
+ "faeces": "feces",
550
+ "familiarisation": "familiarization",
551
+ "familiarise": "familiarize",
552
+ "familiarised": "familiarized",
553
+ "familiarises": "familiarizes",
554
+ "familiarising": "familiarizing",
555
+ "fantasise": "fantasize",
556
+ "fantasised": "fantasized",
557
+ "fantasises": "fantasizes",
558
+ "fantasising": "fantasizing",
559
+ "favour": "favor",
560
+ "favourable": "favorable",
561
+ "favourably": "favorably",
562
+ "favoured": "favored",
563
+ "favouring": "favoring",
564
+ "favourite": "favorite",
565
+ "favourites": "favorites",
566
+ "favouritism": "favoritism",
567
+ "favours": "favors",
568
+ "feminise": "feminize",
569
+ "feminised": "feminized",
570
+ "feminises": "feminizes",
571
+ "feminising": "feminizing",
572
+ "fertilisation": "fertilization",
573
+ "fertilise": "fertilize",
574
+ "fertilised": "fertilized",
575
+ "fertiliser": "fertilizer",
576
+ "fertilisers": "fertilizers",
577
+ "fertilises": "fertilizes",
578
+ "fertilising": "fertilizing",
579
+ "fervour": "fervor",
580
+ "fibre": "fiber",
581
+ "fibreglass": "fiberglass",
582
+ "fibres": "fibers",
583
+ "fictionalisation": "fictionalization",
584
+ "fictionalisations": "fictionalizations",
585
+ "fictionalise": "fictionalize",
586
+ "fictionalised": "fictionalized",
587
+ "fictionalises": "fictionalizes",
588
+ "fictionalising": "fictionalizing",
589
+ "fillet": "filet",
590
+ "filleted": "fileted",
591
+ "filleting": "fileting",
592
+ "fillets": "filets",
593
+ "finalisation": "finalization",
594
+ "finalise": "finalize",
595
+ "finalised": "finalized",
596
+ "finalises": "finalizes",
597
+ "finalising": "finalizing",
598
+ "flautist": "flutist",
599
+ "flautists": "flutists",
600
+ "flavour": "flavor",
601
+ "flavoured": "flavored",
602
+ "flavouring": "flavoring",
603
+ "flavourings": "flavorings",
604
+ "flavourless": "flavorless",
605
+ "flavours": "flavors",
606
+ "flavoursome": "flavorsome",
607
+ "flyer / flier": "flier / flyer",
608
+ "foetal": "fetal",
609
+ "foetid": "fetid",
610
+ "foetus": "fetus",
611
+ "foetuses": "fetuses",
612
+ "formalisation": "formalization",
613
+ "formalise": "formalize",
614
+ "formalised": "formalized",
615
+ "formalises": "formalizes",
616
+ "formalising": "formalizing",
617
+ "fossilisation": "fossilization",
618
+ "fossilise": "fossilize",
619
+ "fossilised": "fossilized",
620
+ "fossilises": "fossilizes",
621
+ "fossilising": "fossilizing",
622
+ "fraternisation": "fraternization",
623
+ "fraternise": "fraternize",
624
+ "fraternised": "fraternized",
625
+ "fraternises": "fraternizes",
626
+ "fraternising": "fraternizing",
627
+ "fulfil": "fulfill",
628
+ "fulfilment": "fulfillment",
629
+ "fulfils": "fulfills",
630
+ "funnelled": "funneled",
631
+ "funnelling": "funneling",
632
+ "gage": "gauge",
633
+ "gaged": "gauged",
634
+ "gages": "gauges",
635
+ "gaging": "gauging",
636
+ "galvanise": "galvanize",
637
+ "galvanised": "galvanized",
638
+ "galvanises": "galvanizes",
639
+ "galvanising": "galvanizing",
640
+ "gambolled": "gamboled",
641
+ "gambolling": "gamboling",
642
+ "gaol": "jail",
643
+ "gaolbird": "jailbird",
644
+ "gaolbirds": "jailbirds",
645
+ "gaolbreak": "jailbreak",
646
+ "gaolbreaks": "jailbreaks",
647
+ "gaoled": "jailed",
648
+ "gaoler": "jailer",
649
+ "gaolers": "jailers",
650
+ "gaoling": "jailing",
651
+ "gaols": "jails",
652
+ "gasses": "gases",
653
+ "generalisation": "generalization",
654
+ "generalisations": "generalizations",
655
+ "generalise": "generalize",
656
+ "generalised": "generalized",
657
+ "generalises": "generalizes",
658
+ "generalising": "generalizing",
659
+ "ghettoise": "ghettoize",
660
+ "ghettoised": "ghettoized",
661
+ "ghettoises": "ghettoizes",
662
+ "ghettoising": "ghettoizing",
663
+ "gipsies": "gypsies",
664
+ "glamor": "glamour",
665
+ "glamorise": "glamorize",
666
+ "glamorised": "glamorized",
667
+ "glamorises": "glamorizes",
668
+ "glamorising": "glamorizing",
669
+ "globalisation": "globalization",
670
+ "globalise": "globalize",
671
+ "globalised": "globalized",
672
+ "globalises": "globalizes",
673
+ "globalising": "globalizing",
674
+ "glueing": "gluing",
675
+ "goitre": "goiter",
676
+ "goitres": "goiters",
677
+ "gonorrhoea": "gonorrhea",
678
+ "gramme": "gram",
679
+ "grammes": "grams",
680
+ "gravelled": "graveled",
681
+ "grey": "gray",
682
+ "greyed": "grayed",
683
+ "greying": "graying",
684
+ "greyish": "grayish",
685
+ "greyness": "grayness",
686
+ "greys": "grays",
687
+ "grovelled": "groveled",
688
+ "grovelling": "groveling",
689
+ "groyne": "groin",
690
+ "groynes": "groins",
691
+ "gruelling": "grueling",
692
+ "gruellingly": "gruelingly",
693
+ "gryphon": "griffin",
694
+ "gryphons": "griffins",
695
+ "gynaecological": "gynecological",
696
+ "gynaecologist": "gynecologist",
697
+ "gynaecologists": "gynecologists",
698
+ "gynaecology": "gynecology",
699
+ "haematological": "hematological",
700
+ "haematologist": "hematologist",
701
+ "haematologists": "hematologists",
702
+ "haematology": "hematology",
703
+ "haemoglobin": "hemoglobin",
704
+ "haemophilia": "hemophilia",
705
+ "haemophiliac": "hemophiliac",
706
+ "haemophiliacs": "hemophiliacs",
707
+ "haemorrhage": "hemorrhage",
708
+ "haemorrhaged": "hemorrhaged",
709
+ "haemorrhages": "hemorrhages",
710
+ "haemorrhaging": "hemorrhaging",
711
+ "haemorrhoids": "hemorrhoids",
712
+ "harbour": "harbor",
713
+ "harboured": "harbored",
714
+ "harbouring": "harboring",
715
+ "harbours": "harbors",
716
+ "harmonisation": "harmonization",
717
+ "harmonise": "harmonize",
718
+ "harmonised": "harmonized",
719
+ "harmonises": "harmonizes",
720
+ "harmonising": "harmonizing",
721
+ "homoeopath": "homeopath",
722
+ "homoeopathic": "homeopathic",
723
+ "homoeopaths": "homeopaths",
724
+ "homoeopathy": "homeopathy",
725
+ "homogenise": "homogenize",
726
+ "homogenised": "homogenized",
727
+ "homogenises": "homogenizes",
728
+ "homogenising": "homogenizing",
729
+ "honour": "honor",
730
+ "honourable": "honorable",
731
+ "honourably": "honorably",
732
+ "honoured": "honored",
733
+ "honouring": "honoring",
734
+ "honours": "honors",
735
+ "hospitalisation": "hospitalization",
736
+ "hospitalise": "hospitalize",
737
+ "hospitalised": "hospitalized",
738
+ "hospitalises": "hospitalizes",
739
+ "hospitalising": "hospitalizing",
740
+ "humanise": "humanize",
741
+ "humanised": "humanized",
742
+ "humanises": "humanizes",
743
+ "humanising": "humanizing",
744
+ "humour": "humor",
745
+ "humoured": "humored",
746
+ "humouring": "humoring",
747
+ "humourless": "humorless",
748
+ "humours": "humors",
749
+ "hybridise": "hybridize",
750
+ "hybridised": "hybridized",
751
+ "hybridises": "hybridizes",
752
+ "hybridising": "hybridizing",
753
+ "hypnotise": "hypnotize",
754
+ "hypnotised": "hypnotized",
755
+ "hypnotises": "hypnotizes",
756
+ "hypnotising": "hypnotizing",
757
+ "hypothesise": "hypothesize",
758
+ "hypothesised": "hypothesized",
759
+ "hypothesises": "hypothesizes",
760
+ "hypothesising": "hypothesizing",
761
+ "idealisation": "idealization",
762
+ "idealise": "idealize",
763
+ "idealised": "idealized",
764
+ "idealises": "idealizes",
765
+ "idealising": "idealizing",
766
+ "idolise": "idolize",
767
+ "idolised": "idolized",
768
+ "idolises": "idolizes",
769
+ "idolising": "idolizing",
770
+ "immobilisation": "immobilization",
771
+ "immobilise": "immobilize",
772
+ "immobilised": "immobilized",
773
+ "immobiliser": "immobilizer",
774
+ "immobilisers": "immobilizers",
775
+ "immobilises": "immobilizes",
776
+ "immobilising": "immobilizing",
777
+ "immortalise": "immortalize",
778
+ "immortalised": "immortalized",
779
+ "immortalises": "immortalizes",
780
+ "immortalising": "immortalizing",
781
+ "immunisation": "immunization",
782
+ "immunise": "immunize",
783
+ "immunised": "immunized",
784
+ "immunises": "immunizes",
785
+ "immunising": "immunizing",
786
+ "impanelled": "impaneled",
787
+ "impanelling": "impaneling",
788
+ "imperilled": "imperiled",
789
+ "imperilling": "imperiling",
790
+ "individualise": "individualize",
791
+ "individualised": "individualized",
792
+ "individualises": "individualizes",
793
+ "individualising": "individualizing",
794
+ "industrialise": "industrialize",
795
+ "industrialised": "industrialized",
796
+ "industrialises": "industrializes",
797
+ "industrialising": "industrializing",
798
+ "inflexion": "inflection",
799
+ "inflexions": "inflections",
800
+ "initialise": "initialize",
801
+ "initialised": "initialized",
802
+ "initialises": "initializes",
803
+ "initialising": "initializing",
804
+ "initialled": "initialed",
805
+ "initialling": "initialing",
806
+ "instal": "install",
807
+ "instalment": "installment",
808
+ "instalments": "installments",
809
+ "instals": "installs",
810
+ "instil": "instill",
811
+ "instils": "instills",
812
+ "institutionalisation": "institutionalization",
813
+ "institutionalise": "institutionalize",
814
+ "institutionalised": "institutionalized",
815
+ "institutionalises": "institutionalizes",
816
+ "institutionalising": "institutionalizing",
817
+ "intellectualise": "intellectualize",
818
+ "intellectualised": "intellectualized",
819
+ "intellectualises": "intellectualizes",
820
+ "intellectualising": "intellectualizing",
821
+ "internalisation": "internalization",
822
+ "internalise": "internalize",
823
+ "internalised": "internalized",
824
+ "internalises": "internalizes",
825
+ "internalising": "internalizing",
826
+ "internationalisation": "internationalization",
827
+ "internationalise": "internationalize",
828
+ "internationalised": "internationalized",
829
+ "internationalises": "internationalizes",
830
+ "internationalising": "internationalizing",
831
+ "ionisation": "ionization",
832
+ "ionise": "ionize",
833
+ "ionised": "ionized",
834
+ "ioniser": "ionizer",
835
+ "ionisers": "ionizers",
836
+ "ionises": "ionizes",
837
+ "ionising": "ionizing",
838
+ "italicise": "italicize",
839
+ "italicised": "italicized",
840
+ "italicises": "italicizes",
841
+ "italicising": "italicizing",
842
+ "itemise": "itemize",
843
+ "itemised": "itemized",
844
+ "itemises": "itemizes",
845
+ "itemising": "itemizing",
846
+ "jeopardise": "jeopardize",
847
+ "jeopardised": "jeopardized",
848
+ "jeopardises": "jeopardizes",
849
+ "jeopardising": "jeopardizing",
850
+ "jewelled": "jeweled",
851
+ "jeweller": "jeweler",
852
+ "jewellers": "jewelers",
853
+ "jewellery": "jewelry",
854
+ "judgement": "judgment",
855
+ "kilogramme": "kilogram",
856
+ "kilogrammes": "kilograms",
857
+ "kilometre": "kilometer",
858
+ "kilometres": "kilometers",
859
+ "labelled": "labeled",
860
+ "labelling": "labeling",
861
+ "labour": "labor",
862
+ "laboured": "labored",
863
+ "labourer": "laborer",
864
+ "labourers": "laborers",
865
+ "labouring": "laboring",
866
+ "labours": "labors",
867
+ "lacklustre": "lackluster",
868
+ "legalisation": "legalization",
869
+ "legalise": "legalize",
870
+ "legalised": "legalized",
871
+ "legalises": "legalizes",
872
+ "legalising": "legalizing",
873
+ "legitimise": "legitimize",
874
+ "legitimised": "legitimized",
875
+ "legitimises": "legitimizes",
876
+ "legitimising": "legitimizing",
877
+ "leukaemia": "leukemia",
878
+ "levelled": "leveled",
879
+ "leveller": "leveler",
880
+ "levellers": "levelers",
881
+ "levelling": "leveling",
882
+ "libelled": "libeled",
883
+ "libelling": "libeling",
884
+ "libellous": "libelous",
885
+ "liberalisation": "liberalization",
886
+ "liberalise": "liberalize",
887
+ "liberalised": "liberalized",
888
+ "liberalises": "liberalizes",
889
+ "liberalising": "liberalizing",
890
+ "licence": "license",
891
+ "licenced": "licensed",
892
+ "licences": "licenses",
893
+ "licencing": "licensing",
894
+ "likeable": "likable",
895
+ "lionisation": "lionization",
896
+ "lionise": "lionize",
897
+ "lionised": "lionized",
898
+ "lionises": "lionizes",
899
+ "lionising": "lionizing",
900
+ "liquidise": "liquidize",
901
+ "liquidised": "liquidized",
902
+ "liquidiser": "liquidizer",
903
+ "liquidisers": "liquidizers",
904
+ "liquidises": "liquidizes",
905
+ "liquidising": "liquidizing",
906
+ "litre": "liter",
907
+ "litres": "liters",
908
+ "localise": "localize",
909
+ "localised": "localized",
910
+ "localises": "localizes",
911
+ "localising": "localizing",
912
+ "louvre": "louver",
913
+ "louvred": "louvered",
914
+ "louvres": "louvers",
915
+ "lustre": "luster",
916
+ "magnetise": "magnetize",
917
+ "magnetised": "magnetized",
918
+ "magnetises": "magnetizes",
919
+ "magnetising": "magnetizing",
920
+ "manoeuvrability": "maneuverability",
921
+ "manoeuvrable": "maneuverable",
922
+ "manoeuvre": "maneuver",
923
+ "manoeuvred": "maneuvered",
924
+ "manoeuvres": "maneuvers",
925
+ "manoeuvring": "maneuvering",
926
+ "manoeuvrings": "maneuverings",
927
+ "marginalisation": "marginalization",
928
+ "marginalise": "marginalize",
929
+ "marginalised": "marginalized",
930
+ "marginalises": "marginalizes",
931
+ "marginalising": "marginalizing",
932
+ "marshalled": "marshaled",
933
+ "marshalling": "marshaling",
934
+ "marvelled": "marveled",
935
+ "marvelling": "marveling",
936
+ "marvellous": "marvelous",
937
+ "marvellously": "marvelously",
938
+ "materialisation": "materialization",
939
+ "materialise": "materialize",
940
+ "materialised": "materialized",
941
+ "materialises": "materializes",
942
+ "materialising": "materializing",
943
+ "maximisation": "maximization",
944
+ "maximise": "maximize",
945
+ "maximised": "maximized",
946
+ "maximises": "maximizes",
947
+ "maximising": "maximizing",
948
+ "meagre": "meager",
949
+ "mechanisation": "mechanization",
950
+ "mechanise": "mechanize",
951
+ "mechanised": "mechanized",
952
+ "mechanises": "mechanizes",
953
+ "mechanising": "mechanizing",
954
+ "mediaeval": "medieval",
955
+ "memorialise": "memorialize",
956
+ "memorialised": "memorialized",
957
+ "memorialises": "memorializes",
958
+ "memorialising": "memorializing",
959
+ "memorise": "memorize",
960
+ "memorised": "memorized",
961
+ "memorises": "memorizes",
962
+ "memorising": "memorizing",
963
+ "mesmerise": "mesmerize",
964
+ "mesmerised": "mesmerized",
965
+ "mesmerises": "mesmerizes",
966
+ "mesmerising": "mesmerizing",
967
+ "metabolise": "metabolize",
968
+ "metabolised": "metabolized",
969
+ "metabolises": "metabolizes",
970
+ "metabolising": "metabolizing",
971
+ "metre": "meter",
972
+ "metres": "meters",
973
+ "mhm": "hmm",
974
+ "micrometre": "micrometer",
975
+ "micrometres": "micrometers",
976
+ "militarise": "militarize",
977
+ "militarised": "militarized",
978
+ "militarises": "militarizes",
979
+ "militarising": "militarizing",
980
+ "milligramme": "milligram",
981
+ "milligrammes": "milligrams",
982
+ "millilitre": "milliliter",
983
+ "millilitres": "milliliters",
984
+ "millimetre": "millimeter",
985
+ "millimetres": "millimeters",
986
+ "miniaturisation": "miniaturization",
987
+ "miniaturise": "miniaturize",
988
+ "miniaturised": "miniaturized",
989
+ "miniaturises": "miniaturizes",
990
+ "miniaturising": "miniaturizing",
991
+ "minibusses": "minibuses",
992
+ "minimise": "minimize",
993
+ "minimised": "minimized",
994
+ "minimises": "minimizes",
995
+ "minimising": "minimizing",
996
+ "misbehaviour": "misbehavior",
997
+ "misdemeanour": "misdemeanor",
998
+ "misdemeanours": "misdemeanors",
999
+ "misspelt": "misspelled",
1000
+ "mitre": "miter",
1001
+ "mitres": "miters",
1002
+ "mm": "hmm",
1003
+ "mmm": "hmm",
1004
+ "mobilisation": "mobilization",
1005
+ "mobilise": "mobilize",
1006
+ "mobilised": "mobilized",
1007
+ "mobilises": "mobilizes",
1008
+ "mobilising": "mobilizing",
1009
+ "modelled": "modeled",
1010
+ "modeller": "modeler",
1011
+ "modellers": "modelers",
1012
+ "modelling": "modeling",
1013
+ "modernise": "modernize",
1014
+ "modernised": "modernized",
1015
+ "modernises": "modernizes",
1016
+ "modernising": "modernizing",
1017
+ "moisturise": "moisturize",
1018
+ "moisturised": "moisturized",
1019
+ "moisturiser": "moisturizer",
1020
+ "moisturisers": "moisturizers",
1021
+ "moisturises": "moisturizes",
1022
+ "moisturising": "moisturizing",
1023
+ "monologue": "monolog",
1024
+ "monologues": "monologs",
1025
+ "monopolisation": "monopolization",
1026
+ "monopolise": "monopolize",
1027
+ "monopolised": "monopolized",
1028
+ "monopolises": "monopolizes",
1029
+ "monopolising": "monopolizing",
1030
+ "moralise": "moralize",
1031
+ "moralised": "moralized",
1032
+ "moralises": "moralizes",
1033
+ "moralising": "moralizing",
1034
+ "motorised": "motorized",
1035
+ "mould": "mold",
1036
+ "moulded": "molded",
1037
+ "moulder": "molder",
1038
+ "mouldered": "moldered",
1039
+ "mouldering": "moldering",
1040
+ "moulders": "molders",
1041
+ "mouldier": "moldier",
1042
+ "mouldiest": "moldiest",
1043
+ "moulding": "molding",
1044
+ "mouldings": "moldings",
1045
+ "moulds": "molds",
1046
+ "mouldy": "moldy",
1047
+ "moult": "molt",
1048
+ "moulted": "molted",
1049
+ "moulting": "molting",
1050
+ "moults": "molts",
1051
+ "moustache": "mustache",
1052
+ "moustached": "mustached",
1053
+ "moustaches": "mustaches",
1054
+ "moustachioed": "mustachioed",
1055
+ "multicoloured": "multicolored",
1056
+ "nationalisation": "nationalization",
1057
+ "nationalisations": "nationalizations",
1058
+ "nationalise": "nationalize",
1059
+ "nationalised": "nationalized",
1060
+ "nationalises": "nationalizes",
1061
+ "nationalising": "nationalizing",
1062
+ "naturalisation": "naturalization",
1063
+ "naturalise": "naturalize",
1064
+ "naturalised": "naturalized",
1065
+ "naturalises": "naturalizes",
1066
+ "naturalising": "naturalizing",
1067
+ "neighbour": "neighbor",
1068
+ "neighbourhood": "neighborhood",
1069
+ "neighbourhoods": "neighborhoods",
1070
+ "neighbouring": "neighboring",
1071
+ "neighbourliness": "neighborliness",
1072
+ "neighbourly": "neighborly",
1073
+ "neighbours": "neighbors",
1074
+ "neutralisation": "neutralization",
1075
+ "neutralise": "neutralize",
1076
+ "neutralised": "neutralized",
1077
+ "neutralises": "neutralizes",
1078
+ "neutralising": "neutralizing",
1079
+ "normalisation": "normalization",
1080
+ "normalise": "normalize",
1081
+ "normalised": "normalized",
1082
+ "normalises": "normalizes",
1083
+ "normalising": "normalizing",
1084
+ "odour": "odor",
1085
+ "odourless": "odorless",
1086
+ "odours": "odors",
1087
+ "oesophagus": "esophagus",
1088
+ "oesophaguses": "esophaguses",
1089
+ "oestrogen": "estrogen",
1090
+ "offence": "offense",
1091
+ "offences": "offenses",
1092
+ "omelette": "omelet",
1093
+ "omelettes": "omelets",
1094
+ "optimise": "optimize",
1095
+ "optimised": "optimized",
1096
+ "optimises": "optimizes",
1097
+ "optimising": "optimizing",
1098
+ "organisation": "organization",
1099
+ "organisational": "organizational",
1100
+ "organisations": "organizations",
1101
+ "organise": "organize",
1102
+ "organised": "organized",
1103
+ "organiser": "organizer",
1104
+ "organisers": "organizers",
1105
+ "organises": "organizes",
1106
+ "organising": "organizing",
1107
+ "orthopaedic": "orthopedic",
1108
+ "orthopaedics": "orthopedics",
1109
+ "ostracise": "ostracize",
1110
+ "ostracised": "ostracized",
1111
+ "ostracises": "ostracizes",
1112
+ "ostracising": "ostracizing",
1113
+ "outmanoeuvre": "outmaneuver",
1114
+ "outmanoeuvred": "outmaneuvered",
1115
+ "outmanoeuvres": "outmaneuvers",
1116
+ "outmanoeuvring": "outmaneuvering",
1117
+ "overemphasise": "overemphasize",
1118
+ "overemphasised": "overemphasized",
1119
+ "overemphasises": "overemphasizes",
1120
+ "overemphasising": "overemphasizing",
1121
+ "oxidisation": "oxidization",
1122
+ "oxidise": "oxidize",
1123
+ "oxidised": "oxidized",
1124
+ "oxidises": "oxidizes",
1125
+ "oxidising": "oxidizing",
1126
+ "paederast": "pederast",
1127
+ "paederasts": "pederasts",
1128
+ "paediatric": "pediatric",
1129
+ "paediatrician": "pediatrician",
1130
+ "paediatricians": "pediatricians",
1131
+ "paediatrics": "pediatrics",
1132
+ "paedophile": "pedophile",
1133
+ "paedophiles": "pedophiles",
1134
+ "paedophilia": "pedophilia",
1135
+ "palaeolithic": "paleolithic",
1136
+ "palaeontologist": "paleontologist",
1137
+ "palaeontologists": "paleontologists",
1138
+ "palaeontology": "paleontology",
1139
+ "panelled": "paneled",
1140
+ "panelling": "paneling",
1141
+ "panellist": "panelist",
1142
+ "panellists": "panelists",
1143
+ "paralyse": "paralyze",
1144
+ "paralysed": "paralyzed",
1145
+ "paralyses": "paralyzes",
1146
+ "paralysing": "paralyzing",
1147
+ "parcelled": "parceled",
1148
+ "parcelling": "parceling",
1149
+ "parlour": "parlor",
1150
+ "parlours": "parlors",
1151
+ "particularise": "particularize",
1152
+ "particularised": "particularized",
1153
+ "particularises": "particularizes",
1154
+ "particularising": "particularizing",
1155
+ "passivisation": "passivization",
1156
+ "passivise": "passivize",
1157
+ "passivised": "passivized",
1158
+ "passivises": "passivizes",
1159
+ "passivising": "passivizing",
1160
+ "pasteurisation": "pasteurization",
1161
+ "pasteurise": "pasteurize",
1162
+ "pasteurised": "pasteurized",
1163
+ "pasteurises": "pasteurizes",
1164
+ "pasteurising": "pasteurizing",
1165
+ "patronise": "patronize",
1166
+ "patronised": "patronized",
1167
+ "patronises": "patronizes",
1168
+ "patronising": "patronizing",
1169
+ "patronisingly": "patronizingly",
1170
+ "pedalled": "pedaled",
1171
+ "pedalling": "pedaling",
1172
+ "pedestrianisation": "pedestrianization",
1173
+ "pedestrianise": "pedestrianize",
1174
+ "pedestrianised": "pedestrianized",
1175
+ "pedestrianises": "pedestrianizes",
1176
+ "pedestrianising": "pedestrianizing",
1177
+ "penalise": "penalize",
1178
+ "penalised": "penalized",
1179
+ "penalises": "penalizes",
1180
+ "penalising": "penalizing",
1181
+ "pencilled": "penciled",
1182
+ "pencilling": "penciling",
1183
+ "personalise": "personalize",
1184
+ "personalised": "personalized",
1185
+ "personalises": "personalizes",
1186
+ "personalising": "personalizing",
1187
+ "pharmacopoeia": "pharmacopeia",
1188
+ "pharmacopoeias": "pharmacopeias",
1189
+ "philosophise": "philosophize",
1190
+ "philosophised": "philosophized",
1191
+ "philosophises": "philosophizes",
1192
+ "philosophising": "philosophizing",
1193
+ "philtre": "filter",
1194
+ "philtres": "filters",
1195
+ "phoney": "phony",
1196
+ "plagiarise": "plagiarize",
1197
+ "plagiarised": "plagiarized",
1198
+ "plagiarises": "plagiarizes",
1199
+ "plagiarising": "plagiarizing",
1200
+ "plough": "plow",
1201
+ "ploughed": "plowed",
1202
+ "ploughing": "plowing",
1203
+ "ploughman": "plowman",
1204
+ "ploughmen": "plowmen",
1205
+ "ploughs": "plows",
1206
+ "ploughshare": "plowshare",
1207
+ "ploughshares": "plowshares",
1208
+ "polarisation": "polarization",
1209
+ "polarise": "polarize",
1210
+ "polarised": "polarized",
1211
+ "polarises": "polarizes",
1212
+ "polarising": "polarizing",
1213
+ "politicisation": "politicization",
1214
+ "politicise": "politicize",
1215
+ "politicised": "politicized",
1216
+ "politicises": "politicizes",
1217
+ "politicising": "politicizing",
1218
+ "popularisation": "popularization",
1219
+ "popularise": "popularize",
1220
+ "popularised": "popularized",
1221
+ "popularises": "popularizes",
1222
+ "popularising": "popularizing",
1223
+ "pouffe": "pouf",
1224
+ "pouffes": "poufs",
1225
+ "practise": "practice",
1226
+ "practised": "practiced",
1227
+ "practises": "practices",
1228
+ "practising": "practicing",
1229
+ "praesidium": "presidium",
1230
+ "praesidiums": "presidiums",
1231
+ "pressurisation": "pressurization",
1232
+ "pressurise": "pressurize",
1233
+ "pressurised": "pressurized",
1234
+ "pressurises": "pressurizes",
1235
+ "pressurising": "pressurizing",
1236
+ "pretence": "pretense",
1237
+ "pretences": "pretenses",
1238
+ "primaeval": "primeval",
1239
+ "prioritisation": "prioritization",
1240
+ "prioritise": "prioritize",
1241
+ "prioritised": "prioritized",
1242
+ "prioritises": "prioritizes",
1243
+ "prioritising": "prioritizing",
1244
+ "privatisation": "privatization",
1245
+ "privatisations": "privatizations",
1246
+ "privatise": "privatize",
1247
+ "privatised": "privatized",
1248
+ "privatises": "privatizes",
1249
+ "privatising": "privatizing",
1250
+ "professionalisation": "professionalization",
1251
+ "professionalise": "professionalize",
1252
+ "professionalised": "professionalized",
1253
+ "professionalises": "professionalizes",
1254
+ "professionalising": "professionalizing",
1255
+ "programme": "program",
1256
+ "programmes": "programs",
1257
+ "prologue": "prolog",
1258
+ "prologues": "prologs",
1259
+ "propagandise": "propagandize",
1260
+ "propagandised": "propagandized",
1261
+ "propagandises": "propagandizes",
1262
+ "propagandising": "propagandizing",
1263
+ "proselytise": "proselytize",
1264
+ "proselytised": "proselytized",
1265
+ "proselytiser": "proselytizer",
1266
+ "proselytisers": "proselytizers",
1267
+ "proselytises": "proselytizes",
1268
+ "proselytising": "proselytizing",
1269
+ "psychoanalyse": "psychoanalyze",
1270
+ "psychoanalysed": "psychoanalyzed",
1271
+ "psychoanalyses": "psychoanalyzes",
1272
+ "psychoanalysing": "psychoanalyzing",
1273
+ "publicise": "publicize",
1274
+ "publicised": "publicized",
1275
+ "publicises": "publicizes",
1276
+ "publicising": "publicizing",
1277
+ "pulverisation": "pulverization",
1278
+ "pulverise": "pulverize",
1279
+ "pulverised": "pulverized",
1280
+ "pulverises": "pulverizes",
1281
+ "pulverising": "pulverizing",
1282
+ "pummelled": "pummel",
1283
+ "pummelling": "pummeled",
1284
+ "pyjama": "pajama",
1285
+ "pyjamas": "pajamas",
1286
+ "pzazz": "pizzazz",
1287
+ "quarrelled": "quarreled",
1288
+ "quarrelling": "quarreling",
1289
+ "radicalise": "radicalize",
1290
+ "radicalised": "radicalized",
1291
+ "radicalises": "radicalizes",
1292
+ "radicalising": "radicalizing",
1293
+ "rancour": "rancor",
1294
+ "randomise": "randomize",
1295
+ "randomised": "randomized",
1296
+ "randomises": "randomizes",
1297
+ "randomising": "randomizing",
1298
+ "rationalisation": "rationalization",
1299
+ "rationalisations": "rationalizations",
1300
+ "rationalise": "rationalize",
1301
+ "rationalised": "rationalized",
1302
+ "rationalises": "rationalizes",
1303
+ "rationalising": "rationalizing",
1304
+ "ravelled": "raveled",
1305
+ "ravelling": "raveling",
1306
+ "realisable": "realizable",
1307
+ "realisation": "realization",
1308
+ "realisations": "realizations",
1309
+ "realise": "realize",
1310
+ "realised": "realized",
1311
+ "realises": "realizes",
1312
+ "realising": "realizing",
1313
+ "recognisable": "recognizable",
1314
+ "recognisably": "recognizably",
1315
+ "recognisance": "recognizance",
1316
+ "recognise": "recognize",
1317
+ "recognised": "recognized",
1318
+ "recognises": "recognizes",
1319
+ "recognising": "recognizing",
1320
+ "reconnoitre": "reconnoiter",
1321
+ "reconnoitred": "reconnoitered",
1322
+ "reconnoitres": "reconnoiters",
1323
+ "reconnoitring": "reconnoitering",
1324
+ "refuelled": "refueled",
1325
+ "refuelling": "refueling",
1326
+ "regularisation": "regularization",
1327
+ "regularise": "regularize",
1328
+ "regularised": "regularized",
1329
+ "regularises": "regularizes",
1330
+ "regularising": "regularizing",
1331
+ "remodelled": "remodeled",
1332
+ "remodelling": "remodeling",
1333
+ "remould": "remold",
1334
+ "remoulded": "remolded",
1335
+ "remoulding": "remolding",
1336
+ "remoulds": "remolds",
1337
+ "reorganisation": "reorganization",
1338
+ "reorganisations": "reorganizations",
1339
+ "reorganise": "reorganize",
1340
+ "reorganised": "reorganized",
1341
+ "reorganises": "reorganizes",
1342
+ "reorganising": "reorganizing",
1343
+ "revelled": "reveled",
1344
+ "reveller": "reveler",
1345
+ "revellers": "revelers",
1346
+ "revelling": "reveling",
1347
+ "revitalise": "revitalize",
1348
+ "revitalised": "revitalized",
1349
+ "revitalises": "revitalizes",
1350
+ "revitalising": "revitalizing",
1351
+ "revolutionise": "revolutionize",
1352
+ "revolutionised": "revolutionized",
1353
+ "revolutionises": "revolutionizes",
1354
+ "revolutionising": "revolutionizing",
1355
+ "rhapsodise": "rhapsodize",
1356
+ "rhapsodised": "rhapsodized",
1357
+ "rhapsodises": "rhapsodizes",
1358
+ "rhapsodising": "rhapsodizing",
1359
+ "rigour": "rigor",
1360
+ "rigours": "rigors",
1361
+ "ritualised": "ritualized",
1362
+ "rivalled": "rivaled",
1363
+ "rivalling": "rivaling",
1364
+ "romanticise": "romanticize",
1365
+ "romanticised": "romanticized",
1366
+ "romanticises": "romanticizes",
1367
+ "romanticising": "romanticizing",
1368
+ "rumour": "rumor",
1369
+ "rumoured": "rumored",
1370
+ "rumours": "rumors",
1371
+ "sabre": "saber",
1372
+ "sabres": "sabers",
1373
+ "saltpetre": "saltpeter",
1374
+ "sanitise": "sanitize",
1375
+ "sanitised": "sanitized",
1376
+ "sanitises": "sanitizes",
1377
+ "sanitising": "sanitizing",
1378
+ "satirise": "satirize",
1379
+ "satirised": "satirized",
1380
+ "satirises": "satirizes",
1381
+ "satirising": "satirizing",
1382
+ "saviour": "savior",
1383
+ "saviours": "saviors",
1384
+ "savour": "savor",
1385
+ "savoured": "savored",
1386
+ "savouries": "savories",
1387
+ "savouring": "savoring",
1388
+ "savours": "savors",
1389
+ "savoury": "savory",
1390
+ "scandalise": "scandalize",
1391
+ "scandalised": "scandalized",
1392
+ "scandalises": "scandalizes",
1393
+ "scandalising": "scandalizing",
1394
+ "sceptic": "skeptic",
1395
+ "sceptical": "skeptical",
1396
+ "sceptically": "skeptically",
1397
+ "scepticism": "skepticism",
1398
+ "sceptics": "skeptics",
1399
+ "sceptre": "scepter",
1400
+ "sceptres": "scepters",
1401
+ "scrutinise": "scrutinize",
1402
+ "scrutinised": "scrutinized",
1403
+ "scrutinises": "scrutinizes",
1404
+ "scrutinising": "scrutinizing",
1405
+ "secularisation": "secularization",
1406
+ "secularise": "secularize",
1407
+ "secularised": "secularized",
1408
+ "secularises": "secularizes",
1409
+ "secularising": "secularizing",
1410
+ "sensationalise": "sensationalize",
1411
+ "sensationalised": "sensationalized",
1412
+ "sensationalises": "sensationalizes",
1413
+ "sensationalising": "sensationalizing",
1414
+ "sensitise": "sensitize",
1415
+ "sensitised": "sensitized",
1416
+ "sensitises": "sensitizes",
1417
+ "sensitising": "sensitizing",
1418
+ "sentimentalise": "sentimentalize",
1419
+ "sentimentalised": "sentimentalized",
1420
+ "sentimentalises": "sentimentalizes",
1421
+ "sentimentalising": "sentimentalizing",
1422
+ "sepulchre": "sepulcher",
1423
+ "sepulchres": "sepulchers",
1424
+ "serialisation": "serialization",
1425
+ "serialisations": "serializations",
1426
+ "serialise": "serialize",
1427
+ "serialised": "serialized",
1428
+ "serialises": "serializes",
1429
+ "serialising": "serializing",
1430
+ "sermonise": "sermonize",
1431
+ "sermonised": "sermonized",
1432
+ "sermonises": "sermonizes",
1433
+ "sermonising": "sermonizing",
1434
+ "sheikh": "sheik",
1435
+ "shovelled": "shoveled",
1436
+ "shovelling": "shoveling",
1437
+ "shrivelled": "shriveled",
1438
+ "shrivelling": "shriveling",
1439
+ "signalise": "signalize",
1440
+ "signalised": "signalized",
1441
+ "signalises": "signalizes",
1442
+ "signalising": "signalizing",
1443
+ "signalled": "signaled",
1444
+ "signalling": "signaling",
1445
+ "smoulder": "smolder",
1446
+ "smouldered": "smoldered",
1447
+ "smouldering": "smoldering",
1448
+ "smoulders": "smolders",
1449
+ "snivelled": "sniveled",
1450
+ "snivelling": "sniveling",
1451
+ "snorkelled": "snorkeled",
1452
+ "snorkelling": "snorkeling",
1453
+ "snowplough": "snowplow",
1454
+ "snowploughs": "snowplow",
1455
+ "socialisation": "socialization",
1456
+ "socialise": "socialize",
1457
+ "socialised": "socialized",
1458
+ "socialises": "socializes",
1459
+ "socialising": "socializing",
1460
+ "sodomise": "sodomize",
1461
+ "sodomised": "sodomized",
1462
+ "sodomises": "sodomizes",
1463
+ "sodomising": "sodomizing",
1464
+ "solemnise": "solemnize",
1465
+ "solemnised": "solemnized",
1466
+ "solemnises": "solemnizes",
1467
+ "solemnising": "solemnizing",
1468
+ "sombre": "somber",
1469
+ "specialisation": "specialization",
1470
+ "specialisations": "specializations",
1471
+ "specialise": "specialize",
1472
+ "specialised": "specialized",
1473
+ "specialises": "specializes",
1474
+ "specialising": "specializing",
1475
+ "spectre": "specter",
1476
+ "spectres": "specters",
1477
+ "spiralled": "spiraled",
1478
+ "spiralling": "spiraling",
1479
+ "splendour": "splendor",
1480
+ "splendours": "splendors",
1481
+ "squirrelled": "squirreled",
1482
+ "squirrelling": "squirreling",
1483
+ "stabilisation": "stabilization",
1484
+ "stabilise": "stabilize",
1485
+ "stabilised": "stabilized",
1486
+ "stabiliser": "stabilizer",
1487
+ "stabilisers": "stabilizers",
1488
+ "stabilises": "stabilizes",
1489
+ "stabilising": "stabilizing",
1490
+ "standardisation": "standardization",
1491
+ "standardise": "standardize",
1492
+ "standardised": "standardized",
1493
+ "standardises": "standardizes",
1494
+ "standardising": "standardizing",
1495
+ "stencilled": "stenciled",
1496
+ "stencilling": "stenciling",
1497
+ "sterilisation": "sterilization",
1498
+ "sterilisations": "sterilizations",
1499
+ "sterilise": "sterilize",
1500
+ "sterilised": "sterilized",
1501
+ "steriliser": "sterilizer",
1502
+ "sterilisers": "sterilizers",
1503
+ "sterilises": "sterilizes",
1504
+ "sterilising": "sterilizing",
1505
+ "stigmatisation": "stigmatization",
1506
+ "stigmatise": "stigmatize",
1507
+ "stigmatised": "stigmatized",
1508
+ "stigmatises": "stigmatizes",
1509
+ "stigmatising": "stigmatizing",
1510
+ "storey": "story",
1511
+ "storeys": "stories",
1512
+ "subsidisation": "subsidization",
1513
+ "subsidise": "subsidize",
1514
+ "subsidised": "subsidized",
1515
+ "subsidiser": "subsidizer",
1516
+ "subsidisers": "subsidizers",
1517
+ "subsidises": "subsidizes",
1518
+ "subsidising": "subsidizing",
1519
+ "succour": "succor",
1520
+ "succoured": "succored",
1521
+ "succouring": "succoring",
1522
+ "succours": "succors",
1523
+ "sulphate": "sulfate",
1524
+ "sulphates": "sulfates",
1525
+ "sulphide": "sulfide",
1526
+ "sulphides": "sulfides",
1527
+ "sulphur": "sulfur",
1528
+ "sulphurous": "sulfurous",
1529
+ "summarise": "summarize",
1530
+ "summarised": "summarized",
1531
+ "summarises": "summarizes",
1532
+ "summarising": "summarizing",
1533
+ "swivelled": "swiveled",
1534
+ "swivelling": "swiveling",
1535
+ "symbolise": "symbolize",
1536
+ "symbolised": "symbolized",
1537
+ "symbolises": "symbolizes",
1538
+ "symbolising": "symbolizing",
1539
+ "sympathise": "sympathize",
1540
+ "sympathised": "sympathized",
1541
+ "sympathiser": "sympathizer",
1542
+ "sympathisers": "sympathizers",
1543
+ "sympathises": "sympathizes",
1544
+ "sympathising": "sympathizing",
1545
+ "synchronisation": "synchronization",
1546
+ "synchronise": "synchronize",
1547
+ "synchronised": "synchronized",
1548
+ "synchronises": "synchronizes",
1549
+ "synchronising": "synchronizing",
1550
+ "synthesise": "synthesize",
1551
+ "synthesised": "synthesized",
1552
+ "synthesiser": "synthesizer",
1553
+ "synthesisers": "synthesizers",
1554
+ "synthesises": "synthesizes",
1555
+ "synthesising": "synthesizing",
1556
+ "syphon": "siphon",
1557
+ "syphoned": "siphoned",
1558
+ "syphoning": "siphoning",
1559
+ "syphons": "siphons",
1560
+ "systematisation": "systematization",
1561
+ "systematise": "systematize",
1562
+ "systematised": "systematized",
1563
+ "systematises": "systematizes",
1564
+ "systematising": "systematizing",
1565
+ "tantalise": "tantalize",
1566
+ "tantalised": "tantalized",
1567
+ "tantalises": "tantalizes",
1568
+ "tantalising": "tantalizing",
1569
+ "tantalisingly": "tantalizingly",
1570
+ "tasselled": "tasseled",
1571
+ "technicolour": "technicolor",
1572
+ "temporise": "temporize",
1573
+ "temporised": "temporized",
1574
+ "temporises": "temporizes",
1575
+ "temporising": "temporizing",
1576
+ "tenderise": "tenderize",
1577
+ "tenderised": "tenderized",
1578
+ "tenderises": "tenderizes",
1579
+ "tenderising": "tenderizing",
1580
+ "terrorise": "terrorize",
1581
+ "terrorised": "terrorized",
1582
+ "terrorises": "terrorizes",
1583
+ "terrorising": "terrorizing",
1584
+ "theatre": "theater",
1585
+ "theatregoer": "theatergoer",
1586
+ "theatregoers": "theatergoers",
1587
+ "theatres": "theaters",
1588
+ "theorise": "theorize",
1589
+ "theorised": "theorized",
1590
+ "theorises": "theorizes",
1591
+ "theorising": "theorizing",
1592
+ "tonne": "ton",
1593
+ "tonnes": "tons",
1594
+ "towelled": "toweled",
1595
+ "towelling": "toweling",
1596
+ "toxaemia": "toxemia",
1597
+ "tranquillise": "tranquilize",
1598
+ "tranquillised": "tranquilized",
1599
+ "tranquilliser": "tranquilizer",
1600
+ "tranquillisers": "tranquilizers",
1601
+ "tranquillises": "tranquilizes",
1602
+ "tranquillising": "tranquilizing",
1603
+ "tranquillity": "tranquility",
1604
+ "tranquillize": "tranquilize",
1605
+ "tranquillized": "tranquilized",
1606
+ "tranquillizer": "tranquilizer",
1607
+ "tranquillizers": "tranquilizers",
1608
+ "tranquillizes": "tranquilizes",
1609
+ "tranquillizing": "tranquilizing",
1610
+ "tranquilly": "tranquility",
1611
+ "transistorised": "transistorized",
1612
+ "traumatise": "traumatize",
1613
+ "traumatised": "traumatized",
1614
+ "traumatises": "traumatizes",
1615
+ "traumatising": "traumatizing",
1616
+ "travelled": "traveled",
1617
+ "traveller": "traveler",
1618
+ "travellers": "travelers",
1619
+ "travelling": "traveling",
1620
+ "travelog": "travelogue",
1621
+ "travelogs": "travelogues",
1622
+ "trialled": "trialed",
1623
+ "trialling": "trialing",
1624
+ "tricolour": "tricolor",
1625
+ "tricolours": "tricolors",
1626
+ "trivialise": "trivialize",
1627
+ "trivialised": "trivialized",
1628
+ "trivialises": "trivializes",
1629
+ "trivialising": "trivializing",
1630
+ "tumour": "tumor",
1631
+ "tumours": "tumors",
1632
+ "tunnelled": "tunneled",
1633
+ "tunnelling": "tunneling",
1634
+ "tyrannise": "tyrannize",
1635
+ "tyrannised": "tyrannized",
1636
+ "tyrannises": "tyrannizes",
1637
+ "tyrannising": "tyrannizing",
1638
+ "tyre": "tire",
1639
+ "tyres": "tires",
1640
+ "unauthorised": "unauthorized",
1641
+ "uncivilised": "uncivilized",
1642
+ "underutilised": "underutilized",
1643
+ "unequalled": "unequaled",
1644
+ "unfavourable": "unfavorable",
1645
+ "unfavourably": "unfavorably",
1646
+ "unionisation": "unionization",
1647
+ "unionise": "unionize",
1648
+ "unionised": "unionized",
1649
+ "unionises": "unionizes",
1650
+ "unionising": "unionizing",
1651
+ "unorganised": "unorganized",
1652
+ "unravelled": "unraveled",
1653
+ "unravelling": "unraveling",
1654
+ "unrecognisable": "unrecognizable",
1655
+ "unrecognised": "unrecognized",
1656
+ "unrivalled": "unrivaled",
1657
+ "unsavoury": "unsavory",
1658
+ "untrammelled": "untrammeled",
1659
+ "urbanisation": "urbanization",
1660
+ "urbanise": "urbanize",
1661
+ "urbanised": "urbanized",
1662
+ "urbanises": "urbanizes",
1663
+ "urbanising": "urbanizing",
1664
+ "utilisable": "utilizable",
1665
+ "utilisation": "utilization",
1666
+ "utilise": "utilize",
1667
+ "utilised": "utilized",
1668
+ "utilises": "utilizes",
1669
+ "utilising": "utilizing",
1670
+ "valour": "valor",
1671
+ "vandalise": "vandalize",
1672
+ "vandalised": "vandalized",
1673
+ "vandalises": "vandalizes",
1674
+ "vandalising": "vandalizing",
1675
+ "vaporisation": "vaporization",
1676
+ "vaporise": "vaporize",
1677
+ "vaporised": "vaporized",
1678
+ "vaporises": "vaporizes",
1679
+ "vaporising": "vaporizing",
1680
+ "vapour": "vapor",
1681
+ "vapours": "vapors",
1682
+ "verbalise": "verbalize",
1683
+ "verbalised": "verbalized",
1684
+ "verbalises": "verbalizes",
1685
+ "verbalising": "verbalizing",
1686
+ "victimisation": "victimization",
1687
+ "victimise": "victimize",
1688
+ "victimised": "victimized",
1689
+ "victimises": "victimizes",
1690
+ "victimising": "victimizing",
1691
+ "videodisc": "videodisk",
1692
+ "videodiscs": "videodisks",
1693
+ "vigour": "vigor",
1694
+ "visualisation": "visualization",
1695
+ "visualisations": "visualizations",
1696
+ "visualise": "visualize",
1697
+ "visualised": "visualized",
1698
+ "visualises": "visualizes",
1699
+ "visualising": "visualizing",
1700
+ "vocalisation": "vocalization",
1701
+ "vocalisations": "vocalizations",
1702
+ "vocalise": "vocalize",
1703
+ "vocalised": "vocalized",
1704
+ "vocalises": "vocalizes",
1705
+ "vocalising": "vocalizing",
1706
+ "vulcanised": "vulcanized",
1707
+ "vulgarisation": "vulgarization",
1708
+ "vulgarise": "vulgarize",
1709
+ "vulgarised": "vulgarized",
1710
+ "vulgarises": "vulgarizes",
1711
+ "vulgarising": "vulgarizing",
1712
+ "waggon": "wagon",
1713
+ "waggons": "wagons",
1714
+ "watercolour": "watercolor",
1715
+ "watercolours": "watercolors",
1716
+ "weaselled": "weaseled",
1717
+ "weaselling": "weaseling",
1718
+ "westernisation": "westernization",
1719
+ "westernise": "westernize",
1720
+ "westernised": "westernized",
1721
+ "westernises": "westernizes",
1722
+ "westernising": "westernizing",
1723
+ "womanise": "womanize",
1724
+ "womanised": "womanized",
1725
+ "womaniser": "womanizer",
1726
+ "womanisers": "womanizers",
1727
+ "womanises": "womanizes",
1728
+ "womanising": "womanizing",
1729
+ "woollen": "woolen",
1730
+ "woollens": "woolens",
1731
+ "woollies": "woolies",
1732
+ "woolly": "wooly",
1733
+ "worshipped": "worshiped",
1734
+ "worshipper": "worshiper",
1735
+ "worshipping": "worshiping",
1736
+ "yodelled": "yodeled",
1737
+ "yodelling": "yodeling",
1738
+ "yoghourt": "yogurt",
1739
+ "yoghourts": "yogurts",
1740
+ "yoghurt": "yogurt",
1741
+ "yoghurts": "yogurts"
1742
+ }
preprocessor_config.json ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:154295da2b680e283731469d66fb3552823d07524d02e1453e1606abef5b5318
3
+ size 483536061
run.log ADDED
@@ -0,0 +1,677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2022-12-18 08:40:52,091] [WARNING] [runner.py:179:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only.
2
+ [2022-12-18 08:40:52,100] [INFO] [runner.py:508:main] cmd = /usr/bin/python -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMF19 --master_addr=127.0.0.1 --master_port=29500 run_speech_recognition_seq2seq_streaming.py --deepspeed=ds_config.json --model_name_or_path=openai/whisper-small --dataset_name=mozilla-foundation/common_voice_11_0 --dataset_config_name=ro --language=romanian --train_split_name=train+validation --eval_split_name=test --model_index_name=Whisper Small Romanian CV11 --max_steps=5000 --output_dir=./ --per_device_train_batch_size=64 --per_device_eval_batch_size=32 --logging_steps=25 --learning_rate=1e-5 --warmup_steps=500 --evaluation_strategy=steps --eval_steps=1000 --save_strategy=steps --save_steps=1000 --generation_max_length=225 --length_column_name=input_length --max_duration_in_seconds=30 --text_column_name=sentence --freeze_feature_encoder=False --report_to=tensorboard --metric_for_best_model=wer --greater_is_better=False --load_best_model_at_end --gradient_checkpointing --fp16 --overwrite_output_dir --do_train --do_eval --predict_with_generate --do_normalize_eval --streaming --use_auth_token --push_to_hub
3
+ [2022-12-18 08:40:55,346] [INFO] [launch.py:135:main] 0 NV_LIBNCCL_DEV_PACKAGE=libnccl-dev=2.13.4-1+cuda11.7
4
+ [2022-12-18 08:40:55,346] [INFO] [launch.py:135:main] 0 NV_LIBNCCL_DEV_PACKAGE_VERSION=2.13.4-1
5
+ [2022-12-18 08:40:55,346] [INFO] [launch.py:135:main] 0 NCCL_VERSION=2.13.4-1
6
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:135:main] 0 NV_LIBNCCL_DEV_PACKAGE_NAME=libnccl-dev
7
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:135:main] 0 NV_LIBNCCL_PACKAGE=libnccl2=2.13.4-1+cuda11.7
8
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:135:main] 0 NV_LIBNCCL_PACKAGE_NAME=libnccl2
9
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:135:main] 0 NV_LIBNCCL_PACKAGE_VERSION=2.13.4-1
10
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:142:main] WORLD INFO DICT: {'localhost': [0]}
11
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:148:main] nnodes=1, num_local_procs=1, node_rank=0
12
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:161:main] global_rank_mapping=defaultdict(<class 'list'>, {'localhost': [0]})
13
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:162:main] dist_world_size=1
14
+ [2022-12-18 08:40:55,347] [INFO] [launch.py:164:main] Setting CUDA_VISIBLE_DEVICES=0
15
+ [2022-12-18 08:41:04,141] [INFO] [comm.py:654:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl
16
+ 12/18/2022 08:41:04 - WARNING - __main__ - Process rank: 0, device: cuda:0, n_gpu: 1distributed training: True, 16-bits training: True
17
+ 12/18/2022 08:41:04 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
18
+ _n_gpu=1,
19
+ adafactor=False,
20
+ adam_beta1=0.9,
21
+ adam_beta2=0.999,
22
+ adam_epsilon=1e-08,
23
+ auto_find_batch_size=False,
24
+ bf16=False,
25
+ bf16_full_eval=False,
26
+ data_seed=None,
27
+ dataloader_drop_last=False,
28
+ dataloader_num_workers=0,
29
+ dataloader_pin_memory=True,
30
+ ddp_bucket_cap_mb=None,
31
+ ddp_find_unused_parameters=None,
32
+ ddp_timeout=1800,
33
+ debug=[],
34
+ deepspeed=ds_config.json,
35
+ disable_tqdm=False,
36
+ do_eval=True,
37
+ do_predict=False,
38
+ do_train=True,
39
+ eval_accumulation_steps=None,
40
+ eval_delay=0,
41
+ eval_steps=1000,
42
+ evaluation_strategy=steps,
43
+ fp16=True,
44
+ fp16_backend=auto,
45
+ fp16_full_eval=False,
46
+ fp16_opt_level=O1,
47
+ fsdp=[],
48
+ fsdp_min_num_params=0,
49
+ fsdp_transformer_layer_cls_to_wrap=None,
50
+ full_determinism=False,
51
+ generation_max_length=225,
52
+ generation_num_beams=None,
53
+ gradient_accumulation_steps=1,
54
+ gradient_checkpointing=True,
55
+ greater_is_better=False,
56
+ group_by_length=False,
57
+ half_precision_backend=auto,
58
+ hub_model_id=None,
59
+ hub_private_repo=False,
60
+ hub_strategy=every_save,
61
+ hub_token=<HUB_TOKEN>,
62
+ ignore_data_skip=False,
63
+ include_inputs_for_metrics=False,
64
+ jit_mode_eval=False,
65
+ label_names=None,
66
+ label_smoothing_factor=0.0,
67
+ learning_rate=1e-05,
68
+ length_column_name=input_length,
69
+ load_best_model_at_end=True,
70
+ local_rank=0,
71
+ log_level=passive,
72
+ log_level_replica=passive,
73
+ log_on_each_node=True,
74
+ logging_dir=./runs/Dec18_08-41-04_fe2747a042f0,
75
+ logging_first_step=False,
76
+ logging_nan_inf_filter=True,
77
+ logging_steps=25,
78
+ logging_strategy=steps,
79
+ lr_scheduler_type=linear,
80
+ max_grad_norm=1.0,
81
+ max_steps=5000,
82
+ metric_for_best_model=wer,
83
+ mp_parameters=,
84
+ no_cuda=False,
85
+ num_train_epochs=3.0,
86
+ optim=adamw_hf,
87
+ optim_args=None,
88
+ output_dir=./,
89
+ overwrite_output_dir=True,
90
+ past_index=-1,
91
+ per_device_eval_batch_size=32,
92
+ per_device_train_batch_size=64,
93
+ predict_with_generate=True,
94
+ prediction_loss_only=False,
95
+ push_to_hub=True,
96
+ push_to_hub_model_id=None,
97
+ push_to_hub_organization=None,
98
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
99
+ ray_scope=last,
100
+ remove_unused_columns=True,
101
+ report_to=['tensorboard'],
102
+ resume_from_checkpoint=None,
103
+ run_name=./,
104
+ save_on_each_node=False,
105
+ save_steps=1000,
106
+ save_strategy=steps,
107
+ save_total_limit=None,
108
+ seed=42,
109
+ sharded_ddp=[],
110
+ skip_memory_metrics=True,
111
+ sortish_sampler=False,
112
+ tf32=None,
113
+ torch_compile=False,
114
+ torch_compile_backend=None,
115
+ torch_compile_mode=None,
116
+ torchdynamo=None,
117
+ tpu_metrics_debug=False,
118
+ tpu_num_cores=None,
119
+ use_ipex=False,
120
+ use_legacy_prediction_loop=False,
121
+ use_mps_device=False,
122
+ warmup_ratio=0.0,
123
+ warmup_steps=500,
124
+ weight_decay=0.0,
125
+ xpu_backend=None,
126
+ )
127
+ 12/18/2022 08:41:04 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
128
+ _n_gpu=1,
129
+ adafactor=False,
130
+ adam_beta1=0.9,
131
+ adam_beta2=0.999,
132
+ adam_epsilon=1e-08,
133
+ auto_find_batch_size=False,
134
+ bf16=False,
135
+ bf16_full_eval=False,
136
+ data_seed=None,
137
+ dataloader_drop_last=False,
138
+ dataloader_num_workers=0,
139
+ dataloader_pin_memory=True,
140
+ ddp_bucket_cap_mb=None,
141
+ ddp_find_unused_parameters=None,
142
+ ddp_timeout=1800,
143
+ debug=[],
144
+ deepspeed=ds_config.json,
145
+ disable_tqdm=False,
146
+ do_eval=True,
147
+ do_predict=False,
148
+ do_train=True,
149
+ eval_accumulation_steps=None,
150
+ eval_delay=0,
151
+ eval_steps=1000,
152
+ evaluation_strategy=steps,
153
+ fp16=True,
154
+ fp16_backend=auto,
155
+ fp16_full_eval=False,
156
+ fp16_opt_level=O1,
157
+ fsdp=[],
158
+ fsdp_min_num_params=0,
159
+ fsdp_transformer_layer_cls_to_wrap=None,
160
+ full_determinism=False,
161
+ generation_max_length=225,
162
+ generation_num_beams=None,
163
+ gradient_accumulation_steps=1,
164
+ gradient_checkpointing=True,
165
+ greater_is_better=False,
166
+ group_by_length=False,
167
+ half_precision_backend=auto,
168
+ hub_model_id=None,
169
+ hub_private_repo=False,
170
+ hub_strategy=every_save,
171
+ hub_token=<HUB_TOKEN>,
172
+ ignore_data_skip=False,
173
+ include_inputs_for_metrics=False,
174
+ jit_mode_eval=False,
175
+ label_names=None,
176
+ label_smoothing_factor=0.0,
177
+ learning_rate=1e-05,
178
+ length_column_name=input_length,
179
+ load_best_model_at_end=True,
180
+ local_rank=0,
181
+ log_level=passive,
182
+ log_level_replica=passive,
183
+ log_on_each_node=True,
184
+ logging_dir=./runs/Dec18_08-41-04_fe2747a042f0,
185
+ logging_first_step=False,
186
+ logging_nan_inf_filter=True,
187
+ logging_steps=25,
188
+ logging_strategy=steps,
189
+ lr_scheduler_type=linear,
190
+ max_grad_norm=1.0,
191
+ max_steps=5000,
192
+ metric_for_best_model=wer,
193
+ mp_parameters=,
194
+ no_cuda=False,
195
+ num_train_epochs=3.0,
196
+ optim=adamw_hf,
197
+ optim_args=None,
198
+ output_dir=./,
199
+ overwrite_output_dir=True,
200
+ past_index=-1,
201
+ per_device_eval_batch_size=32,
202
+ per_device_train_batch_size=64,
203
+ predict_with_generate=True,
204
+ prediction_loss_only=False,
205
+ push_to_hub=True,
206
+ push_to_hub_model_id=None,
207
+ push_to_hub_organization=None,
208
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
209
+ ray_scope=last,
210
+ remove_unused_columns=True,
211
+ report_to=['tensorboard'],
212
+ resume_from_checkpoint=None,
213
+ run_name=./,
214
+ save_on_each_node=False,
215
+ save_steps=1000,
216
+ save_strategy=steps,
217
+ save_total_limit=None,
218
+ seed=42,
219
+ sharded_ddp=[],
220
+ skip_memory_metrics=True,
221
+ sortish_sampler=False,
222
+ tf32=None,
223
+ torch_compile=False,
224
+ torch_compile_backend=None,
225
+ torch_compile_mode=None,
226
+ torchdynamo=None,
227
+ tpu_metrics_debug=False,
228
+ tpu_num_cores=None,
229
+ use_ipex=False,
230
+ use_legacy_prediction_loop=False,
231
+ use_mps_device=False,
232
+ warmup_ratio=0.0,
233
+ warmup_steps=500,
234
+ weight_decay=0.0,
235
+ xpu_backend=None,
236
+ )
237
+ 12/18/2022 08:41:07 - INFO - datasets.info - Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
238
+ 12/18/2022 08:41:11 - INFO - datasets.info - Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
239
+ 12/18/2022 08:41:14 - INFO - datasets.info - Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
240
+ 12/18/2022 08:41:59 - WARNING - huggingface_hub.repository - /usr/src/app/models/whisper-small-ro-cv11/./ is already a clone of https://huggingface.co/mikr/whisper-small-ro-cv11. Make sure you pull the latest changes with `repo.git_pull()`.
241
+ [2022-12-18 08:42:04,348] [INFO] [logging.py:68:log_dist] [Rank 0] DeepSpeed info: version=0.8.0+a25c31b6, git-hash=a25c31b6, git-branch=master
242
+ [2022-12-18 08:42:04,669] [INFO] [logging.py:68:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False
243
+ Adam Optimizer #0 is created with AVX2 arithmetic capability.
244
+ Config: alpha=0.000010, betas=(0.900000, 0.999000), weight_decay=0.000000, adam_w=1
245
+ [2022-12-18 08:42:07,543] [INFO] [logging.py:68:log_dist] [Rank 0] Using DeepSpeed Optimizer param name adamw as basic optimizer
246
+ [2022-12-18 08:42:07,597] [INFO] [logging.py:68:log_dist] [Rank 0] DeepSpeed Basic Optimizer = DeepSpeedCPUAdam
247
+ [2022-12-18 08:42:07,597] [INFO] [utils.py:52:is_zero_supported_optimizer] Checking ZeRO support for optimizer=DeepSpeedCPUAdam type=<class 'deepspeed.ops.adam.cpu_adam.DeepSpeedCPUAdam'>
248
+ [2022-12-18 08:42:07,598] [INFO] [logging.py:68:log_dist] [Rank 0] Creating fp16 ZeRO stage 2 optimizer
249
+ [2022-12-18 08:42:07,598] [INFO] [stage_1_and_2.py:141:__init__] Reduce bucket size 200000000
250
+ [2022-12-18 08:42:07,598] [INFO] [stage_1_and_2.py:142:__init__] Allgather bucket size 200000000
251
+ [2022-12-18 08:42:07,598] [INFO] [stage_1_and_2.py:143:__init__] CPU Offload: True
252
+ [2022-12-18 08:42:07,598] [INFO] [stage_1_and_2.py:144:__init__] Round robin gradient partitioning: False
253
+ Rank: 0 partition count [1] and sizes[(241734912, False)]
254
+ [2022-12-18 08:42:08,957] [INFO] [utils.py:831:see_memory_usage] Before initializing optimizer states
255
+ [2022-12-18 08:42:08,958] [INFO] [utils.py:832:see_memory_usage] MA 0.53 GB Max_MA 0.53 GB CA 0.53 GB Max_CA 1 GB
256
+ [2022-12-18 08:42:08,958] [INFO] [utils.py:840:see_memory_usage] CPU Virtual Memory: used = 379.95 GB, percent = 75.4%
257
+ [2022-12-18 08:42:10,038] [INFO] [utils.py:831:see_memory_usage] After initializing optimizer states
258
+ [2022-12-18 08:42:10,039] [INFO] [utils.py:832:see_memory_usage] MA 0.53 GB Max_MA 0.53 GB CA 0.53 GB Max_CA 1 GB
259
+ [2022-12-18 08:42:10,039] [INFO] [utils.py:840:see_memory_usage] CPU Virtual Memory: used = 382.79 GB, percent = 76.0%
260
+ [2022-12-18 08:42:10,039] [INFO] [stage_1_and_2.py:527:__init__] optimizer state initialized
261
+ [2022-12-18 08:42:10,147] [INFO] [utils.py:831:see_memory_usage] After initializing ZeRO optimizer
262
+ [2022-12-18 08:42:10,148] [INFO] [utils.py:832:see_memory_usage] MA 0.53 GB Max_MA 0.53 GB CA 0.53 GB Max_CA 1 GB
263
+ [2022-12-18 08:42:10,148] [INFO] [utils.py:840:see_memory_usage] CPU Virtual Memory: used = 382.83 GB, percent = 76.0%
264
+ [2022-12-18 08:42:10,170] [INFO] [logging.py:68:log_dist] [Rank 0] DeepSpeed Final Optimizer = adamw
265
+ [2022-12-18 08:42:10,170] [INFO] [logging.py:68:log_dist] [Rank 0] DeepSpeed using configured LR scheduler = WarmupDecayLR
266
+ [2022-12-18 08:42:10,170] [INFO] [logging.py:68:log_dist] [Rank 0] DeepSpeed LR Scheduler = <deepspeed.runtime.lr_schedules.WarmupDecayLR object at 0x7ff2cab02f10>
267
+ [2022-12-18 08:42:10,170] [INFO] [logging.py:68:log_dist] [Rank 0] step=0, skipped=0, lr=[1e-05], mom=[[0.9, 0.999]]
268
+ [2022-12-18 08:42:10,172] [INFO] [config.py:1008:print] DeepSpeedEngine configuration:
269
+ [2022-12-18 08:42:10,172] [INFO] [config.py:1012:print] activation_checkpointing_config {
270
+ "partition_activations": false,
271
+ "contiguous_memory_optimization": false,
272
+ "cpu_checkpointing": false,
273
+ "number_checkpoints": null,
274
+ "synchronize_checkpoint_boundary": false,
275
+ "profile": false
276
+ }
277
+ [2022-12-18 08:42:10,172] [INFO] [config.py:1012:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True}
278
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] amp_enabled .................. False
279
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] amp_params ................... False
280
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] autotuning_config ............ {
281
+ "enabled": false,
282
+ "start_step": null,
283
+ "end_step": null,
284
+ "metric_path": null,
285
+ "arg_mappings": null,
286
+ "metric": "throughput",
287
+ "model_info": null,
288
+ "results_dir": "autotuning_results",
289
+ "exps_dir": "autotuning_exps",
290
+ "overwrite": true,
291
+ "fast": true,
292
+ "start_profile_step": 3,
293
+ "end_profile_step": 5,
294
+ "tuner_type": "gridsearch",
295
+ "tuner_early_stopping": 5,
296
+ "tuner_num_trials": 50,
297
+ "model_info_path": null,
298
+ "mp_size": 1,
299
+ "max_train_batch_size": null,
300
+ "min_train_batch_size": 1,
301
+ "max_train_micro_batch_size_per_gpu": 1.024000e+03,
302
+ "min_train_micro_batch_size_per_gpu": 1,
303
+ "num_tuning_micro_batch_sizes": 3
304
+ }
305
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] bfloat16_enabled ............. False
306
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] checkpoint_parallel_write_pipeline False
307
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] checkpoint_tag_validation_enabled True
308
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] checkpoint_tag_validation_fail False
309
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] comms_config ................. <deepspeed.comm.config.DeepSpeedCommsConfig object at 0x7ff2cde355b0>
310
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] communication_data_type ...... None
311
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}}
312
+ [2022-12-18 08:42:10,173] [INFO] [config.py:1012:print] curriculum_enabled_legacy .... False
313
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] curriculum_params_legacy ..... False
314
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}}
315
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] data_efficiency_enabled ...... False
316
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] dataloader_drop_last ......... False
317
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] disable_allgather ............ False
318
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] dump_state ................... False
319
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] dynamic_loss_scale_args ...... {'init_scale': 65536, 'scale_window': 1000, 'delayed_shift': 2, 'min_scale': 1}
320
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_enabled ........... False
321
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_gas_boundary_resolution 1
322
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_layer_name ........ bert.encoder.layer
323
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_layer_num ......... 0
324
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_max_iter .......... 100
325
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_stability ......... 1e-06
326
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_tol ............... 0.01
327
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] eigenvalue_verbose ........... False
328
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] elasticity_enabled ........... False
329
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] flops_profiler_config ........ {
330
+ "enabled": false,
331
+ "profile_step": 1,
332
+ "module_depth": -1,
333
+ "top_modules": 1,
334
+ "detailed": true,
335
+ "output_file": null
336
+ }
337
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] fp16_auto_cast ............... False
338
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] fp16_enabled ................. True
339
+ [2022-12-18 08:42:10,174] [INFO] [config.py:1012:print] fp16_master_weights_and_gradients False
340
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] global_rank .................. 0
341
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] grad_accum_dtype ............. None
342
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] gradient_accumulation_steps .. 1
343
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] gradient_clipping ............ 1.0
344
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] gradient_predivide_factor .... 1.0
345
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] initial_dynamic_scale ........ 65536
346
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] load_universal_checkpoint .... False
347
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] loss_scale ................... 0
348
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] memory_breakdown ............. False
349
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] monitor_config ............... <deepspeed.monitor.config.DeepSpeedMonitorConfig object at 0x7ff2cde359d0>
350
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] nebula_config ................ {
351
+ "enabled": false,
352
+ "persistent_storage_path": null,
353
+ "persistent_time_interval": 100,
354
+ "num_of_version_in_retention": 2,
355
+ "enable_nebula_load": true,
356
+ "load_path": null
357
+ }
358
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] optimizer_legacy_fusion ...... False
359
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] optimizer_name ............... adamw
360
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] optimizer_params ............. {'lr': 1e-05, 'betas': [0.9, 0.999], 'eps': 1e-08, 'weight_decay': 0.0}
361
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0}
362
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] pld_enabled .................. False
363
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] pld_params ................... False
364
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] prescale_gradients ........... False
365
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] scheduler_name ............... WarmupDecayLR
366
+ [2022-12-18 08:42:10,175] [INFO] [config.py:1012:print] scheduler_params ............. {'last_batch_iteration': -1, 'total_num_steps': 5000, 'warmup_min_lr': 0, 'warmup_max_lr': 1e-05, 'warmup_num_steps': 500}
367
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] sparse_attention ............. None
368
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] sparse_gradients_enabled ..... False
369
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] steps_per_print .............. 10
370
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] train_batch_size ............. 64
371
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] train_micro_batch_size_per_gpu 64
372
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] use_node_local_storage ....... False
373
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] wall_clock_breakdown ......... False
374
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] world_size ................... 1
375
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] zero_allow_untested_optimizer False
376
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] zero_config .................. stage=2 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=200000000 allgather_partitions=True allgather_bucket_size=200000000 overlap_comm=True load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=DeepSpeedZeroOffloadOptimizerConfig(device='cpu', nvme_path=None, buffer_count=4, pin_memory=True, pipeline=False, pipeline_read=False, pipeline_write=False, fast_init=False) sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False
377
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] zero_enabled ................. True
378
+ [2022-12-18 08:42:10,176] [INFO] [config.py:1012:print] zero_optimization_stage ...... 2
379
+ [2022-12-18 08:42:10,176] [INFO] [config.py:997:print_user_config] json = {
380
+ "fp16": {
381
+ "enabled": true,
382
+ "loss_scale": 0,
383
+ "loss_scale_window": 1000,
384
+ "initial_scale_power": 16,
385
+ "hysteresis": 2,
386
+ "min_loss_scale": 1
387
+ },
388
+ "optimizer": {
389
+ "type": "AdamW",
390
+ "params": {
391
+ "lr": 1e-05,
392
+ "betas": [0.9, 0.999],
393
+ "eps": 1e-08,
394
+ "weight_decay": 0.0
395
+ }
396
+ },
397
+ "scheduler": {
398
+ "type": "WarmupDecayLR",
399
+ "params": {
400
+ "last_batch_iteration": -1,
401
+ "total_num_steps": 5.000000e+03,
402
+ "warmup_min_lr": 0,
403
+ "warmup_max_lr": 1e-05,
404
+ "warmup_num_steps": 500
405
+ }
406
+ },
407
+ "zero_optimization": {
408
+ "stage": 2,
409
+ "offload_optimizer": {
410
+ "device": "cpu",
411
+ "pin_memory": true
412
+ },
413
+ "allgather_partitions": true,
414
+ "allgather_bucket_size": 2.000000e+08,
415
+ "overlap_comm": true,
416
+ "reduce_scatter": true,
417
+ "reduce_bucket_size": 2.000000e+08,
418
+ "contiguous_gradients": true
419
+ },
420
+ "gradient_accumulation_steps": 1,
421
+ "gradient_clipping": 1.0,
422
+ "train_batch_size": 64,
423
+ "train_micro_batch_size_per_gpu": 64
424
+ }
425
+ [2022-12-18 08:44:27,389] [INFO] [stage_1_and_2.py:1767:step] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 65536, reducing to 65536
426
+ [2022-12-18 08:44:43,482] [INFO] [stage_1_and_2.py:1767:step] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 65536, reducing to 32768.0
427
+ [2022-12-18 08:45:01,180] [INFO] [stage_1_and_2.py:1767:step] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 32768.0, reducing to 16384.0
428
+ [2022-12-18 08:45:17,756] [INFO] [stage_1_and_2.py:1767:step] [deepspeed] OVERFLOW! Rank 0 Skipping step. Attempted loss scale: 16384.0, reducing to 8192.0
429
+ [2022-12-18 08:47:03,107] [INFO] [logging.py:68:log_dist] [Rank 0] step=10, skipped=4, lr=[2.883141528559073e-06], mom=[[0.9, 0.999]]
430
+ [2022-12-18 08:47:03,108] [INFO] [timer.py:196:stop] epoch=0/micro_step=10/global_step=10, RunningAvgSamplesPerSec=17.81398816266197, CurrSamplesPerSec=17.536521056985904, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
431
+ [2022-12-18 08:49:58,788] [INFO] [logging.py:68:log_dist] [Rank 0] step=20, skipped=4, lr=[4.461405575910259e-06], mom=[[0.9, 0.999]]
432
+ [2022-12-18 08:49:58,790] [INFO] [timer.py:196:stop] epoch=0/micro_step=20/global_step=20, RunningAvgSamplesPerSec=17.707913433080698, CurrSamplesPerSec=17.67266334257317, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
433
+ {'loss': 0.817, 'learning_rate': 4.898977360288234e-06, 'epoch': 0.01}
434
+ [2022-12-18 08:52:59,924] [INFO] [logging.py:68:log_dist] [Rank 0] step=30, skipped=4, lr=[5.242641991936178e-06], mom=[[0.9, 0.999]]
435
+ [2022-12-18 08:52:59,926] [INFO] [timer.py:196:stop] epoch=0/micro_step=30/global_step=30, RunningAvgSamplesPerSec=17.624193801450975, CurrSamplesPerSec=17.42146956646065, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
436
+ [2022-12-18 08:56:04,927] [INFO] [logging.py:68:log_dist] [Rank 0] step=40, skipped=4, lr=[5.766283057118146e-06], mom=[[0.9, 0.999]]
437
+ [2022-12-18 08:56:04,928] [INFO] [timer.py:196:stop] epoch=0/micro_step=40/global_step=40, RunningAvgSamplesPerSec=17.611825644201268, CurrSamplesPerSec=17.199435261553543, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
438
+ [2022-12-18 08:59:02,160] [INFO] [logging.py:68:log_dist] [Rank 0] step=50, skipped=4, lr=[6.160712527409633e-06], mom=[[0.9, 0.999]]
439
+ [2022-12-18 08:59:02,163] [INFO] [timer.py:196:stop] epoch=0/micro_step=50/global_step=50, RunningAvgSamplesPerSec=17.57224681838786, CurrSamplesPerSec=17.471532518337153, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
440
+ {'loss': 0.3452, 'learning_rate': 6.160712527409633e-06, 'epoch': 0.01}
441
+ [2022-12-18 09:01:57,588] [INFO] [logging.py:68:log_dist] [Rank 0] step=60, skipped=4, lr=[6.4772414076394205e-06], mom=[[0.9, 0.999]]
442
+ [2022-12-18 09:01:57,590] [INFO] [timer.py:196:stop] epoch=0/micro_step=60/global_step=60, RunningAvgSamplesPerSec=17.556437081890596, CurrSamplesPerSec=17.58886056667178, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
443
+ [2022-12-18 09:04:51,502] [INFO] [logging.py:68:log_dist] [Rank 0] step=70, skipped=4, lr=[6.741623406776245e-06], mom=[[0.9, 0.999]]
444
+ [2022-12-18 09:04:51,503] [INFO] [timer.py:196:stop] epoch=0/micro_step=70/global_step=70, RunningAvgSamplesPerSec=17.550034157580424, CurrSamplesPerSec=17.404004920974277, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
445
+ {'loss': 0.3043, 'learning_rate': 6.85912902234906e-06, 'epoch': 0.01}
446
+ [2022-12-18 09:07:42,313] [INFO] [logging.py:68:log_dist] [Rank 0] step=80, skipped=4, lr=[6.968634661590082e-06], mom=[[0.9, 0.999]]
447
+ [2022-12-18 09:07:42,314] [INFO] [timer.py:196:stop] epoch=0/micro_step=80/global_step=80, RunningAvgSamplesPerSec=17.54554576845597, CurrSamplesPerSec=17.300649307375817, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
448
+ [2022-12-18 09:10:37,574] [INFO] [logging.py:68:log_dist] [Rank 0] step=90, skipped=4, lr=[7.1675433522258775e-06], mom=[[0.9, 0.999]]
449
+ [2022-12-18 09:10:37,575] [INFO] [timer.py:196:stop] epoch=0/micro_step=90/global_step=90, RunningAvgSamplesPerSec=17.54364083829183, CurrSamplesPerSec=17.543068524409126, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
450
+ [2022-12-18 09:13:33,056] [INFO] [logging.py:68:log_dist] [Rank 0] step=100, skipped=4, lr=[7.344547104469332e-06], mom=[[0.9, 0.999]]
451
+ [2022-12-18 09:13:33,058] [INFO] [timer.py:196:stop] epoch=0/micro_step=100/global_step=100, RunningAvgSamplesPerSec=17.544745558767882, CurrSamplesPerSec=17.311613610419286, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
452
+ {'loss': 0.2484, 'learning_rate': 7.344547104469332e-06, 'epoch': 0.02}
453
+ [2022-12-18 09:16:00,166] [INFO] [logging.py:68:log_dist] [Rank 0] step=110, skipped=4, lr=[7.503995457567235e-06], mom=[[0.9, 0.999]]
454
+ [2022-12-18 09:16:00,167] [INFO] [timer.py:196:stop] epoch=0/micro_step=110/global_step=110, RunningAvgSamplesPerSec=17.544294890083517, CurrSamplesPerSec=17.83244775214949, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
455
+ [2022-12-18 09:19:12,626] [INFO] [logging.py:68:log_dist] [Rank 0] step=120, skipped=4, lr=[7.649058662787184e-06], mom=[[0.9, 0.999]]
456
+ [2022-12-18 09:19:12,627] [INFO] [timer.py:196:stop] epoch=0/micro_step=120/global_step=120, RunningAvgSamplesPerSec=17.573285029996587, CurrSamplesPerSec=17.949146033725896, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
457
+ {'loss': 0.1897, 'learning_rate': 7.716963756434345e-06, 'epoch': 1.0}
458
+ [2022-12-18 09:22:01,411] [INFO] [logging.py:68:log_dist] [Rank 0] step=130, skipped=4, lr=[7.782118888847307e-06], mom=[[0.9, 0.999]]
459
+ [2022-12-18 09:22:01,412] [INFO] [timer.py:196:stop] epoch=0/micro_step=130/global_step=130, RunningAvgSamplesPerSec=17.55456949498131, CurrSamplesPerSec=17.13766063397266, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
460
+ [2022-12-18 09:24:51,224] [INFO] [logging.py:68:log_dist] [Rank 0] step=140, skipped=4, lr=[7.905011559752758e-06], mom=[[0.9, 0.999]]
461
+ [2022-12-18 09:24:51,225] [INFO] [timer.py:196:stop] epoch=0/micro_step=140/global_step=140, RunningAvgSamplesPerSec=17.55073845541743, CurrSamplesPerSec=17.72275019960219, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
462
+ [2022-12-18 09:27:38,350] [INFO] [logging.py:68:log_dist] [Rank 0] step=150, skipped=4, lr=[8.019180844200955e-06], mom=[[0.9, 0.999]]
463
+ [2022-12-18 09:27:38,352] [INFO] [timer.py:196:stop] epoch=0/micro_step=150/global_step=150, RunningAvgSamplesPerSec=17.54153626249399, CurrSamplesPerSec=17.28127515360052, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
464
+ {'loss': 0.1751, 'learning_rate': 8.019180844200955e-06, 'epoch': 1.01}
465
+ [2022-12-18 09:30:26,932] [INFO] [logging.py:68:log_dist] [Rank 0] step=160, skipped=4, lr=[8.125783520495252e-06], mom=[[0.9, 0.999]]
466
+ [2022-12-18 09:30:26,933] [INFO] [timer.py:196:stop] epoch=0/micro_step=160/global_step=160, RunningAvgSamplesPerSec=17.54237608474746, CurrSamplesPerSec=17.425431147803835, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
467
+ [2022-12-18 09:33:14,947] [INFO] [logging.py:68:log_dist] [Rank 0] step=170, skipped=4, lr=[8.225760510392298e-06], mom=[[0.9, 0.999]]
468
+ [2022-12-18 09:33:14,948] [INFO] [timer.py:196:stop] epoch=0/micro_step=170/global_step=170, RunningAvgSamplesPerSec=17.5394079030006, CurrSamplesPerSec=17.734454226542, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
469
+ {'loss': 0.1499, 'learning_rate': 8.27351214279797e-06, 'epoch': 1.01}
470
+ [2022-12-18 09:36:02,185] [INFO] [logging.py:68:log_dist] [Rank 0] step=180, skipped=4, lr=[8.31988745412743e-06], mom=[[0.9, 0.999]]
471
+ [2022-12-18 09:36:02,186] [INFO] [timer.py:196:stop] epoch=0/micro_step=180/global_step=180, RunningAvgSamplesPerSec=17.537096713384305, CurrSamplesPerSec=17.369822224845038, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
472
+ [2022-12-18 09:38:51,989] [INFO] [logging.py:68:log_dist] [Rank 0] step=190, skipped=4, lr=[8.408811289387583e-06], mom=[[0.9, 0.999]]
473
+ [2022-12-18 09:38:51,991] [INFO] [timer.py:196:stop] epoch=0/micro_step=190/global_step=190, RunningAvgSamplesPerSec=17.532972451853638, CurrSamplesPerSec=17.369981828636313, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
474
+ [2022-12-18 09:41:41,517] [INFO] [logging.py:68:log_dist] [Rank 0] step=200, skipped=4, lr=[8.49307723936858e-06], mom=[[0.9, 0.999]]
475
+ [2022-12-18 09:41:41,519] [INFO] [timer.py:196:stop] epoch=0/micro_step=200/global_step=200, RunningAvgSamplesPerSec=17.521639611761227, CurrSamplesPerSec=17.05773687963696, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
476
+ {'loss': 0.145, 'learning_rate': 8.49307723936858e-06, 'epoch': 1.02}
477
+ [2022-12-18 09:44:30,225] [INFO] [logging.py:68:log_dist] [Rank 0] step=210, skipped=4, lr=[8.573149077803088e-06], mom=[[0.9, 0.999]]
478
+ [2022-12-18 09:44:30,227] [INFO] [timer.py:196:stop] epoch=0/micro_step=210/global_step=210, RunningAvgSamplesPerSec=17.518351230897235, CurrSamplesPerSec=17.491189976865027, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
479
+ [2022-12-18 09:47:20,015] [INFO] [logging.py:68:log_dist] [Rank 0] step=220, skipped=4, lr=[8.64942458567722e-06], mom=[[0.9, 0.999]]
480
+ [2022-12-18 09:47:20,016] [INFO] [timer.py:196:stop] epoch=0/micro_step=220/global_step=220, RunningAvgSamplesPerSec=17.52044478044767, CurrSamplesPerSec=17.551188301729674, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
481
+ {'loss': 0.1039, 'learning_rate': 8.686247975778677e-06, 'epoch': 1.02}
482
+ [2022-12-18 09:48:47,089] [INFO] [logging.py:68:log_dist] [Rank 0] step=230, skipped=4, lr=[8.722247506883805e-06], mom=[[0.9, 0.999]]
483
+ [2022-12-18 09:48:47,090] [INFO] [timer.py:196:stop] epoch=0/micro_step=230/global_step=230, RunningAvgSamplesPerSec=17.52583657733623, CurrSamplesPerSec=17.795484760153986, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
484
+ [2022-12-18 09:52:53,020] [INFO] [logging.py:68:log_dist] [Rank 0] step=240, skipped=4, lr=[8.79191691333329e-06], mom=[[0.9, 0.999]]
485
+ [2022-12-18 09:52:53,022] [INFO] [timer.py:196:stop] epoch=0/micro_step=240/global_step=240, RunningAvgSamplesPerSec=17.540278044164523, CurrSamplesPerSec=17.340279812152833, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
486
+ [2022-12-18 09:55:42,935] [INFO] [logging.py:68:log_dist] [Rank 0] step=250, skipped=4, lr=[8.858694625217149e-06], mom=[[0.9, 0.999]]
487
+ [2022-12-18 09:55:42,936] [INFO] [timer.py:196:stop] epoch=0/micro_step=250/global_step=250, RunningAvgSamplesPerSec=17.528756039040783, CurrSamplesPerSec=17.753428194487324, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
488
+ {'loss': 0.0958, 'learning_rate': 8.858694625217149e-06, 'epoch': 2.0}
489
+ [2022-12-18 09:58:31,035] [INFO] [logging.py:68:log_dist] [Rank 0] step=260, skipped=4, lr=[8.922811151820517e-06], mom=[[0.9, 0.999]]
490
+ [2022-12-18 09:58:31,036] [INFO] [timer.py:196:stop] epoch=0/micro_step=260/global_step=260, RunningAvgSamplesPerSec=17.51445195778724, CurrSamplesPerSec=17.76546084196235, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
491
+ [2022-12-18 10:01:19,374] [INFO] [logging.py:68:log_dist] [Rank 0] step=270, skipped=4, lr=[8.984470493319244e-06], mom=[[0.9, 0.999]]
492
+ [2022-12-18 10:01:19,375] [INFO] [timer.py:196:stop] epoch=0/micro_step=270/global_step=270, RunningAvgSamplesPerSec=17.514860492931785, CurrSamplesPerSec=17.65513873928837, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
493
+ {'loss': 0.086, 'learning_rate': 9.014436199608479e-06, 'epoch': 2.01}
494
+ [2022-12-18 10:04:09,796] [INFO] [logging.py:68:log_dist] [Rank 0] step=280, skipped=4, lr=[9.043854055968706e-06], mom=[[0.9, 0.999]]
495
+ [2022-12-18 10:04:09,797] [INFO] [timer.py:196:stop] epoch=0/micro_step=280/global_step=280, RunningAvgSamplesPerSec=17.507487376679528, CurrSamplesPerSec=17.419261679194896, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
496
+ [2022-12-18 10:06:58,165] [INFO] [logging.py:68:log_dist] [Rank 0] step=290, skipped=4, lr=[9.10112387015335e-06], mom=[[0.9, 0.999]]
497
+ [2022-12-18 10:06:58,167] [INFO] [timer.py:196:stop] epoch=0/micro_step=290/global_step=290, RunningAvgSamplesPerSec=17.506782737249797, CurrSamplesPerSec=17.55021064009011, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
498
+ [2022-12-18 10:09:48,667] [INFO] [logging.py:68:log_dist] [Rank 0] step=300, skipped=4, lr=[9.156425255148058e-06], mom=[[0.9, 0.999]]
499
+ [2022-12-18 10:09:48,669] [INFO] [timer.py:196:stop] epoch=0/micro_step=300/global_step=300, RunningAvgSamplesPerSec=17.505229793083824, CurrSamplesPerSec=17.57551569533738, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
500
+ {'loss': 0.0686, 'learning_rate': 9.156425255148058e-06, 'epoch': 2.01}
501
+ [2022-12-18 10:12:41,600] [INFO] [logging.py:68:log_dist] [Rank 0] step=310, skipped=4, lr=[9.209889040960644e-06], mom=[[0.9, 0.999]]
502
+ [2022-12-18 10:12:41,601] [INFO] [timer.py:196:stop] epoch=0/micro_step=310/global_step=310, RunningAvgSamplesPerSec=17.506193858975763, CurrSamplesPerSec=17.60106809421937, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
503
+ [2022-12-18 10:15:34,448] [INFO] [logging.py:68:log_dist] [Rank 0] step=320, skipped=4, lr=[9.261633432763397e-06], mom=[[0.9, 0.999]]
504
+ [2022-12-18 10:15:34,449] [INFO] [timer.py:196:stop] epoch=0/micro_step=320/global_step=320, RunningAvgSamplesPerSec=17.502672734107193, CurrSamplesPerSec=17.71995293610675, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
505
+ {'loss': 0.0684, 'learning_rate': 9.28689473531776e-06, 'epoch': 2.02}
506
+ [2022-12-18 10:18:25,400] [INFO] [logging.py:68:log_dist] [Rank 0] step=330, skipped=4, lr=[9.311765584761373e-06], mom=[[0.9, 0.999]]
507
+ [2022-12-18 10:18:25,402] [INFO] [timer.py:196:stop] epoch=0/micro_step=330/global_step=330, RunningAvgSamplesPerSec=17.502310309234087, CurrSamplesPerSec=17.587128551283918, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
508
+ [2022-12-18 10:21:18,016] [INFO] [logging.py:68:log_dist] [Rank 0] step=340, skipped=4, lr=[9.360382936198493e-06], mom=[[0.9, 0.999]]
509
+ [2022-12-18 10:21:18,018] [INFO] [timer.py:196:stop] epoch=0/micro_step=340/global_step=340, RunningAvgSamplesPerSec=17.500643734444402, CurrSamplesPerSec=17.005072277531735, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
510
+ [2022-12-18 10:24:04,872] [INFO] [logging.py:68:log_dist] [Rank 0] step=350, skipped=4, lr=[9.407574351377137e-06], mom=[[0.9, 0.999]]
511
+ [2022-12-18 10:24:04,874] [INFO] [timer.py:196:stop] epoch=0/micro_step=350/global_step=350, RunningAvgSamplesPerSec=17.51464932561927, CurrSamplesPerSec=17.630059089676152, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
512
+ {'loss': 0.0482, 'learning_rate': 9.407574351377137e-06, 'epoch': 3.0}
513
+ [2022-12-18 10:26:57,641] [INFO] [logging.py:68:log_dist] [Rank 0] step=360, skipped=4, lr=[9.45342109721062e-06], mom=[[0.9, 0.999]]
514
+ [2022-12-18 10:26:57,643] [INFO] [timer.py:196:stop] epoch=0/micro_step=360/global_step=360, RunningAvgSamplesPerSec=17.505842284973642, CurrSamplesPerSec=17.025209457256768, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
515
+ [2022-12-18 10:29:51,036] [INFO] [logging.py:68:log_dist] [Rank 0] step=370, skipped=4, lr=[9.497997685324628e-06], mom=[[0.9, 0.999]]
516
+ [2022-12-18 10:29:51,038] [INFO] [timer.py:196:stop] epoch=0/micro_step=370/global_step=370, RunningAvgSamplesPerSec=17.505074655498845, CurrSamplesPerSec=17.17602944826478, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
517
+ {'loss': 0.0504, 'learning_rate': 9.519831289296397e-06, 'epoch': 3.01}
518
+ [2022-12-18 10:32:41,738] [INFO] [logging.py:68:log_dist] [Rank 0] step=380, skipped=4, lr=[9.541372600623587e-06], mom=[[0.9, 0.999]]
519
+ [2022-12-18 10:32:41,739] [INFO] [timer.py:196:stop] epoch=0/micro_step=380/global_step=380, RunningAvgSamplesPerSec=17.50077202769039, CurrSamplesPerSec=17.66240728131972, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
520
+ [2022-12-18 10:35:32,596] [INFO] [logging.py:68:log_dist] [Rank 0] step=390, skipped=4, lr=[9.583608934209288e-06], mom=[[0.9, 0.999]]
521
+ [2022-12-18 10:35:32,597] [INFO] [timer.py:196:stop] epoch=0/micro_step=390/global_step=390, RunningAvgSamplesPerSec=17.502615176175013, CurrSamplesPerSec=17.671288199732384, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
522
+ [2022-12-18 10:38:24,713] [INFO] [logging.py:68:log_dist] [Rank 0] step=400, skipped=4, lr=[9.624764935335318e-06], mom=[[0.9, 0.999]]
523
+ [2022-12-18 10:38:24,714] [INFO] [timer.py:196:stop] epoch=0/micro_step=400/global_step=400, RunningAvgSamplesPerSec=17.501141402908466, CurrSamplesPerSec=17.413306679702867, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
524
+ {'loss': 0.0446, 'learning_rate': 9.624764935335318e-06, 'epoch': 3.01}
525
+ [2022-12-18 10:41:14,089] [INFO] [logging.py:68:log_dist] [Rank 0] step=410, skipped=4, lr=[9.664894494516345e-06], mom=[[0.9, 0.999]]
526
+ [2022-12-18 10:41:14,090] [INFO] [timer.py:196:stop] epoch=0/micro_step=410/global_step=410, RunningAvgSamplesPerSec=17.50299955137452, CurrSamplesPerSec=16.856797996893206, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
527
+ [2022-12-18 10:44:03,121] [INFO] [logging.py:68:log_dist] [Rank 0] step=420, skipped=4, lr=[9.704047567846437e-06], mom=[[0.9, 0.999]]
528
+ [2022-12-18 10:44:03,123] [INFO] [timer.py:196:stop] epoch=0/micro_step=420/global_step=420, RunningAvgSamplesPerSec=17.49955598397826, CurrSamplesPerSec=17.39210969006482, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
529
+ {'loss': 0.0396, 'learning_rate': 9.723272550712454e-06, 'epoch': 3.02}
530
+ [2022-12-18 10:46:53,231] [INFO] [logging.py:68:log_dist] [Rank 0] step=430, skipped=4, lr=[9.742270550908135e-06], mom=[[0.9, 0.999]]
531
+ [2022-12-18 10:46:53,233] [INFO] [timer.py:196:stop] epoch=0/micro_step=430/global_step=430, RunningAvgSamplesPerSec=17.502169263783284, CurrSamplesPerSec=17.82420181661996, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
532
+ [2022-12-18 10:49:44,733] [INFO] [logging.py:68:log_dist] [Rank 0] step=440, skipped=4, lr=[9.779606609292176e-06], mom=[[0.9, 0.999]]
533
+ [2022-12-18 10:49:44,734] [INFO] [timer.py:196:stop] epoch=0/micro_step=440/global_step=440, RunningAvgSamplesPerSec=17.502468898132452, CurrSamplesPerSec=17.01432443514231, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
534
+ [2022-12-18 10:52:34,347] [INFO] [logging.py:68:log_dist] [Rank 0] step=450, skipped=4, lr=[9.816095971633122e-06], mom=[[0.9, 0.999]]
535
+ [2022-12-18 10:52:34,348] [INFO] [timer.py:196:stop] epoch=0/micro_step=450/global_step=450, RunningAvgSamplesPerSec=17.50661530301726, CurrSamplesPerSec=17.68532190940408, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
536
+ {'loss': 0.0309, 'learning_rate': 9.816095971633122e-06, 'epoch': 3.02}
537
+ [2022-12-18 10:54:31,089] [INFO] [logging.py:68:log_dist] [Rank 0] step=460, skipped=4, lr=[9.851776190149156e-06], mom=[[0.9, 0.999]]
538
+ [2022-12-18 10:54:31,091] [INFO] [timer.py:196:stop] epoch=0/micro_step=460/global_step=460, RunningAvgSamplesPerSec=17.505343508354425, CurrSamplesPerSec=17.622945563320123, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
539
+ [2022-12-18 10:58:09,980] [INFO] [logging.py:68:log_dist] [Rank 0] step=470, skipped=4, lr=[9.886682372916766e-06], mom=[[0.9, 0.999]]
540
+ [2022-12-18 10:58:09,982] [INFO] [timer.py:196:stop] epoch=0/micro_step=470/global_step=470, RunningAvgSamplesPerSec=17.51432072204074, CurrSamplesPerSec=17.560018580808034, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
541
+ {'loss': 0.0269, 'learning_rate': 9.90385555539545e-06, 'epoch': 4.0}
542
+ [2022-12-18 11:01:02,577] [INFO] [logging.py:68:log_dist] [Rank 0] step=480, skipped=4, lr=[9.92084739148192e-06], mom=[[0.9, 0.999]]
543
+ [2022-12-18 11:01:02,579] [INFO] [timer.py:196:stop] epoch=0/micro_step=480/global_step=480, RunningAvgSamplesPerSec=17.51471041450659, CurrSamplesPerSec=17.770770916559375, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
544
+ [2022-12-18 11:03:53,720] [INFO] [logging.py:68:log_dist] [Rank 0] step=490, skipped=4, lr=[9.954302066885107e-06], mom=[[0.9, 0.999]]
545
+ [2022-12-18 11:03:53,722] [INFO] [timer.py:196:stop] epoch=0/micro_step=490/global_step=490, RunningAvgSamplesPerSec=17.514707806076828, CurrSamplesPerSec=17.474965145825077, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
546
+ [2022-12-18 11:06:40,711] [INFO] [logging.py:68:log_dist] [Rank 0] step=500, skipped=4, lr=[9.987075336738768e-06], mom=[[0.9, 0.999]]
547
+ [2022-12-18 11:06:40,713] [INFO] [timer.py:196:stop] epoch=0/micro_step=500/global_step=500, RunningAvgSamplesPerSec=17.51673358493395, CurrSamplesPerSec=17.728490220205128, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
548
+ {'loss': 0.03, 'learning_rate': 9.987075336738768e-06, 'epoch': 4.01}
549
+ [2022-12-18 11:09:32,253] [INFO] [logging.py:68:log_dist] [Rank 0] step=510, skipped=4, lr=[9.98888888888889e-06], mom=[[0.9, 0.999]]
550
+ [2022-12-18 11:09:32,254] [INFO] [timer.py:196:stop] epoch=0/micro_step=510/global_step=510, RunningAvgSamplesPerSec=17.515181954535795, CurrSamplesPerSec=17.44658815742413, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
551
+ [2022-12-18 11:12:24,243] [INFO] [logging.py:68:log_dist] [Rank 0] step=520, skipped=4, lr=[9.966666666666667e-06], mom=[[0.9, 0.999]]
552
+ [2022-12-18 11:12:24,244] [INFO] [timer.py:196:stop] epoch=0/micro_step=520/global_step=520, RunningAvgSamplesPerSec=17.516449396915945, CurrSamplesPerSec=17.71918211824117, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
553
+ {'loss': 0.025, 'learning_rate': 9.955555555555556e-06, 'epoch': 4.01}
554
+ [2022-12-18 11:15:10,174] [INFO] [logging.py:68:log_dist] [Rank 0] step=530, skipped=4, lr=[9.944444444444445e-06], mom=[[0.9, 0.999]]
555
+ [2022-12-18 11:15:10,175] [INFO] [timer.py:196:stop] epoch=0/micro_step=530/global_step=530, RunningAvgSamplesPerSec=17.515811116445363, CurrSamplesPerSec=17.44589309419759, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
556
+ [2022-12-18 11:17:59,140] [INFO] [logging.py:68:log_dist] [Rank 0] step=540, skipped=4, lr=[9.922222222222222e-06], mom=[[0.9, 0.999]]
557
+ [2022-12-18 11:17:59,141] [INFO] [timer.py:196:stop] epoch=0/micro_step=540/global_step=540, RunningAvgSamplesPerSec=17.51307013237252, CurrSamplesPerSec=17.709312456169634, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
558
+ [2022-12-18 11:20:51,468] [INFO] [logging.py:68:log_dist] [Rank 0] step=550, skipped=4, lr=[9.9e-06], mom=[[0.9, 0.999]]
559
+ [2022-12-18 11:20:51,469] [INFO] [timer.py:196:stop] epoch=0/micro_step=550/global_step=550, RunningAvgSamplesPerSec=17.51360962534701, CurrSamplesPerSec=17.514523814355172, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
560
+ {'loss': 0.0214, 'learning_rate': 9.9e-06, 'epoch': 4.02}
561
+ [2022-12-18 11:23:41,540] [INFO] [logging.py:68:log_dist] [Rank 0] step=560, skipped=4, lr=[9.877777777777778e-06], mom=[[0.9, 0.999]]
562
+ [2022-12-18 11:23:41,542] [INFO] [timer.py:196:stop] epoch=0/micro_step=560/global_step=560, RunningAvgSamplesPerSec=17.515173467375842, CurrSamplesPerSec=17.700679243083353, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
563
+ [2022-12-18 11:26:28,412] [INFO] [logging.py:68:log_dist] [Rank 0] step=570, skipped=4, lr=[9.855555555555555e-06], mom=[[0.9, 0.999]]
564
+ [2022-12-18 11:26:28,413] [INFO] [timer.py:196:stop] epoch=0/micro_step=570/global_step=570, RunningAvgSamplesPerSec=17.515792660700566, CurrSamplesPerSec=17.636638364608388, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
565
+ {'loss': 0.0176, 'learning_rate': 9.844444444444446e-06, 'epoch': 4.02}
566
+ [2022-12-18 11:27:30,542] [INFO] [logging.py:68:log_dist] [Rank 0] step=580, skipped=4, lr=[9.833333333333333e-06], mom=[[0.9, 0.999]]
567
+ [2022-12-18 11:27:30,544] [INFO] [timer.py:196:stop] epoch=0/micro_step=580/global_step=580, RunningAvgSamplesPerSec=17.525312485669467, CurrSamplesPerSec=22.15033486404057, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
568
+ [2022-12-18 11:31:57,731] [INFO] [logging.py:68:log_dist] [Rank 0] step=590, skipped=4, lr=[9.811111111111112e-06], mom=[[0.9, 0.999]]
569
+ [2022-12-18 11:31:57,733] [INFO] [timer.py:196:stop] epoch=0/micro_step=590/global_step=590, RunningAvgSamplesPerSec=17.52578604906633, CurrSamplesPerSec=17.542104377157184, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
570
+ [2022-12-18 11:34:45,506] [INFO] [logging.py:68:log_dist] [Rank 0] step=600, skipped=4, lr=[9.78888888888889e-06], mom=[[0.9, 0.999]]
571
+ [2022-12-18 11:34:45,507] [INFO] [timer.py:196:stop] epoch=0/micro_step=600/global_step=600, RunningAvgSamplesPerSec=17.525721346081685, CurrSamplesPerSec=17.728407089792164, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
572
+ {'loss': 0.0186, 'learning_rate': 9.78888888888889e-06, 'epoch': 5.0}
573
+ [2022-12-18 11:37:32,907] [INFO] [logging.py:68:log_dist] [Rank 0] step=610, skipped=4, lr=[9.766666666666667e-06], mom=[[0.9, 0.999]]
574
+ [2022-12-18 11:37:32,909] [INFO] [timer.py:196:stop] epoch=0/micro_step=610/global_step=610, RunningAvgSamplesPerSec=17.52620539215051, CurrSamplesPerSec=17.665249180375554, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
575
+ [2022-12-18 11:40:23,836] [INFO] [logging.py:68:log_dist] [Rank 0] step=620, skipped=4, lr=[9.744444444444445e-06], mom=[[0.9, 0.999]]
576
+ [2022-12-18 11:40:23,837] [INFO] [timer.py:196:stop] epoch=0/micro_step=620/global_step=620, RunningAvgSamplesPerSec=17.524553828947862, CurrSamplesPerSec=17.647862254407272, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
577
+ {'loss': 0.0179, 'learning_rate': 9.733333333333334e-06, 'epoch': 5.01}
578
+ [2022-12-18 11:43:10,955] [INFO] [logging.py:68:log_dist] [Rank 0] step=630, skipped=4, lr=[9.722222222222223e-06], mom=[[0.9, 0.999]]
579
+ [2022-12-18 11:43:10,957] [INFO] [timer.py:196:stop] epoch=0/micro_step=630/global_step=630, RunningAvgSamplesPerSec=17.524283076579064, CurrSamplesPerSec=17.71436339529198, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
580
+ [2022-12-18 11:45:59,599] [INFO] [logging.py:68:log_dist] [Rank 0] step=640, skipped=4, lr=[9.7e-06], mom=[[0.9, 0.999]]
581
+ [2022-12-18 11:45:59,600] [INFO] [timer.py:196:stop] epoch=0/micro_step=640/global_step=640, RunningAvgSamplesPerSec=17.5239493520232, CurrSamplesPerSec=17.29191637538188, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
582
+ [2022-12-18 11:48:51,158] [INFO] [logging.py:68:log_dist] [Rank 0] step=650, skipped=4, lr=[9.677777777777778e-06], mom=[[0.9, 0.999]]
583
+ [2022-12-18 11:48:51,159] [INFO] [timer.py:196:stop] epoch=0/micro_step=650/global_step=650, RunningAvgSamplesPerSec=17.520107118857844, CurrSamplesPerSec=17.428385128642994, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
584
+ {'loss': 0.0141, 'learning_rate': 9.677777777777778e-06, 'epoch': 5.01}
585
+ [2022-12-18 11:51:44,941] [INFO] [logging.py:68:log_dist] [Rank 0] step=660, skipped=4, lr=[9.655555555555556e-06], mom=[[0.9, 0.999]]
586
+ [2022-12-18 11:51:44,942] [INFO] [timer.py:196:stop] epoch=0/micro_step=660/global_step=660, RunningAvgSamplesPerSec=17.519444727080156, CurrSamplesPerSec=16.994613216593315, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
587
+ [2022-12-18 11:54:37,926] [INFO] [logging.py:68:log_dist] [Rank 0] step=670, skipped=4, lr=[9.633333333333335e-06], mom=[[0.9, 0.999]]
588
+ [2022-12-18 11:54:37,928] [INFO] [timer.py:196:stop] epoch=0/micro_step=670/global_step=670, RunningAvgSamplesPerSec=17.519984376324164, CurrSamplesPerSec=17.66502830473203, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
589
+ {'loss': 0.0131, 'learning_rate': 9.622222222222222e-06, 'epoch': 5.02}
590
+ [2022-12-18 11:57:26,593] [INFO] [logging.py:68:log_dist] [Rank 0] step=680, skipped=4, lr=[9.611111111111112e-06], mom=[[0.9, 0.999]]
591
+ [2022-12-18 11:57:26,595] [INFO] [timer.py:196:stop] epoch=0/micro_step=680/global_step=680, RunningAvgSamplesPerSec=17.51917469369641, CurrSamplesPerSec=17.81082610549716, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
592
+ [2022-12-18 11:59:46,615] [INFO] [logging.py:68:log_dist] [Rank 0] step=690, skipped=4, lr=[9.58888888888889e-06], mom=[[0.9, 0.999]]
593
+ [2022-12-18 11:59:46,616] [INFO] [timer.py:196:stop] epoch=0/micro_step=690/global_step=690, RunningAvgSamplesPerSec=17.520953040279323, CurrSamplesPerSec=17.893703708073588, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
594
+ [2022-12-18 12:03:02,532] [INFO] [logging.py:68:log_dist] [Rank 0] step=700, skipped=4, lr=[9.566666666666668e-06], mom=[[0.9, 0.999]]
595
+ [2022-12-18 12:03:02,534] [INFO] [timer.py:196:stop] epoch=0/micro_step=700/global_step=700, RunningAvgSamplesPerSec=17.526881403928282, CurrSamplesPerSec=17.811006916180688, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
596
+ {'loss': 0.0111, 'learning_rate': 9.566666666666668e-06, 'epoch': 6.0}
597
+ [2022-12-18 12:05:50,396] [INFO] [logging.py:68:log_dist] [Rank 0] step=710, skipped=4, lr=[9.544444444444445e-06], mom=[[0.9, 0.999]]
598
+ [2022-12-18 12:05:50,397] [INFO] [timer.py:196:stop] epoch=0/micro_step=710/global_step=710, RunningAvgSamplesPerSec=17.525663669476064, CurrSamplesPerSec=17.627963556175473, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
599
+ [2022-12-18 12:08:40,501] [INFO] [logging.py:68:log_dist] [Rank 0] step=720, skipped=4, lr=[9.522222222222223e-06], mom=[[0.9, 0.999]]
600
+ [2022-12-18 12:08:40,503] [INFO] [timer.py:196:stop] epoch=0/micro_step=720/global_step=720, RunningAvgSamplesPerSec=17.522315207058163, CurrSamplesPerSec=17.08808021934021, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
601
+ {'loss': 0.0115, 'learning_rate': 9.511111111111112e-06, 'epoch': 6.01}
602
+ [2022-12-18 12:11:29,479] [INFO] [logging.py:68:log_dist] [Rank 0] step=730, skipped=4, lr=[9.5e-06], mom=[[0.9, 0.999]]
603
+ [2022-12-18 12:11:29,481] [INFO] [timer.py:196:stop] epoch=0/micro_step=730/global_step=730, RunningAvgSamplesPerSec=17.522397647878112, CurrSamplesPerSec=17.69114963575808, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
604
+ [2022-12-18 12:14:18,585] [INFO] [logging.py:68:log_dist] [Rank 0] step=740, skipped=4, lr=[9.47777777777778e-06], mom=[[0.9, 0.999]]
605
+ [2022-12-18 12:14:18,587] [INFO] [timer.py:196:stop] epoch=0/micro_step=740/global_step=740, RunningAvgSamplesPerSec=17.522899649689435, CurrSamplesPerSec=17.835842357826117, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
606
+ [2022-12-18 12:17:07,842] [INFO] [logging.py:68:log_dist] [Rank 0] step=750, skipped=4, lr=[9.455555555555557e-06], mom=[[0.9, 0.999]]
607
+ [2022-12-18 12:17:07,843] [INFO] [timer.py:196:stop] epoch=0/micro_step=750/global_step=750, RunningAvgSamplesPerSec=17.52216964269384, CurrSamplesPerSec=17.715532463583024, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
608
+ {'loss': 0.0097, 'learning_rate': 9.455555555555557e-06, 'epoch': 6.01}
609
+ [2022-12-18 12:19:56,259] [INFO] [logging.py:68:log_dist] [Rank 0] step=760, skipped=4, lr=[9.433333333333335e-06], mom=[[0.9, 0.999]]
610
+ [2022-12-18 12:19:56,260] [INFO] [timer.py:196:stop] epoch=0/micro_step=760/global_step=760, RunningAvgSamplesPerSec=17.52350085830269, CurrSamplesPerSec=17.825603229647047, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
611
+ [2022-12-18 12:22:43,814] [INFO] [logging.py:68:log_dist] [Rank 0] step=770, skipped=4, lr=[9.411111111111113e-06], mom=[[0.9, 0.999]]
612
+ [2022-12-18 12:22:43,816] [INFO] [timer.py:196:stop] epoch=0/micro_step=770/global_step=770, RunningAvgSamplesPerSec=17.523872671951036, CurrSamplesPerSec=17.464620175491046, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
613
+ {'loss': 0.0091, 'learning_rate': 9.4e-06, 'epoch': 6.02}
614
+ [2022-12-18 12:25:34,399] [INFO] [logging.py:68:log_dist] [Rank 0] step=780, skipped=4, lr=[9.38888888888889e-06], mom=[[0.9, 0.999]]
615
+ [2022-12-18 12:25:34,401] [INFO] [timer.py:196:stop] epoch=0/micro_step=780/global_step=780, RunningAvgSamplesPerSec=17.523888473835193, CurrSamplesPerSec=17.62105645548852, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
616
+ [2022-12-18 12:28:22,069] [INFO] [logging.py:68:log_dist] [Rank 0] step=790, skipped=4, lr=[9.366666666666668e-06], mom=[[0.9, 0.999]]
617
+ [2022-12-18 12:28:22,070] [INFO] [timer.py:196:stop] epoch=0/micro_step=790/global_step=790, RunningAvgSamplesPerSec=17.52396646475176, CurrSamplesPerSec=17.7942001353745, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
618
+ [2022-12-18 12:31:10,611] [INFO] [logging.py:68:log_dist] [Rank 0] step=800, skipped=4, lr=[9.344444444444446e-06], mom=[[0.9, 0.999]]
619
+ [2022-12-18 12:31:10,613] [INFO] [timer.py:196:stop] epoch=0/micro_step=800/global_step=800, RunningAvgSamplesPerSec=17.52362785335887, CurrSamplesPerSec=17.645739287742696, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
620
+ {'loss': 0.0099, 'learning_rate': 9.344444444444446e-06, 'epoch': 6.02}
621
+ [2022-12-18 12:32:39,816] [INFO] [logging.py:68:log_dist] [Rank 0] step=810, skipped=4, lr=[9.322222222222223e-06], mom=[[0.9, 0.999]]
622
+ [2022-12-18 12:32:39,818] [INFO] [timer.py:196:stop] epoch=0/micro_step=810/global_step=810, RunningAvgSamplesPerSec=17.525217387924933, CurrSamplesPerSec=17.6307387978646, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
623
+ [2022-12-18 12:36:44,201] [INFO] [logging.py:68:log_dist] [Rank 0] step=820, skipped=4, lr=[9.3e-06], mom=[[0.9, 0.999]]
624
+ [2022-12-18 12:36:44,203] [INFO] [timer.py:196:stop] epoch=0/micro_step=820/global_step=820, RunningAvgSamplesPerSec=17.527549173776915, CurrSamplesPerSec=17.24311195262599, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
625
+ {'loss': 0.0073, 'learning_rate': 9.28888888888889e-06, 'epoch': 7.0}
626
+ [2022-12-18 12:39:35,915] [INFO] [logging.py:68:log_dist] [Rank 0] step=830, skipped=4, lr=[9.277777777777778e-06], mom=[[0.9, 0.999]]
627
+ [2022-12-18 12:39:35,917] [INFO] [timer.py:196:stop] epoch=0/micro_step=830/global_step=830, RunningAvgSamplesPerSec=17.527139118776905, CurrSamplesPerSec=17.824774664911605, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
628
+ [2022-12-18 12:42:25,214] [INFO] [logging.py:68:log_dist] [Rank 0] step=840, skipped=4, lr=[9.255555555555556e-06], mom=[[0.9, 0.999]]
629
+ [2022-12-18 12:42:25,215] [INFO] [timer.py:196:stop] epoch=0/micro_step=840/global_step=840, RunningAvgSamplesPerSec=17.529876938383726, CurrSamplesPerSec=17.677469879502205, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
630
+ [2022-12-18 12:45:16,974] [INFO] [logging.py:68:log_dist] [Rank 0] step=850, skipped=4, lr=[9.233333333333334e-06], mom=[[0.9, 0.999]]
631
+ [2022-12-18 12:45:16,976] [INFO] [timer.py:196:stop] epoch=0/micro_step=850/global_step=850, RunningAvgSamplesPerSec=17.533049828024673, CurrSamplesPerSec=17.934881195101724, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
632
+ {'loss': 0.0062, 'learning_rate': 9.233333333333334e-06, 'epoch': 7.01}
633
+ [2022-12-18 12:48:07,233] [INFO] [logging.py:68:log_dist] [Rank 0] step=860, skipped=4, lr=[9.211111111111111e-06], mom=[[0.9, 0.999]]
634
+ [2022-12-18 12:48:07,234] [INFO] [timer.py:196:stop] epoch=0/micro_step=860/global_step=860, RunningAvgSamplesPerSec=17.536686576379214, CurrSamplesPerSec=17.72774792834313, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
635
+ [2022-12-18 12:50:56,254] [INFO] [logging.py:68:log_dist] [Rank 0] step=870, skipped=4, lr=[9.188888888888889e-06], mom=[[0.9, 0.999]]
636
+ [2022-12-18 12:50:56,255] [INFO] [timer.py:196:stop] epoch=0/micro_step=870/global_step=870, RunningAvgSamplesPerSec=17.53939897716571, CurrSamplesPerSec=17.863981354170473, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
637
+ {'loss': 0.006, 'learning_rate': 9.17777777777778e-06, 'epoch': 7.01}
638
+ [2022-12-18 12:53:49,512] [INFO] [logging.py:68:log_dist] [Rank 0] step=880, skipped=4, lr=[9.166666666666666e-06], mom=[[0.9, 0.999]]
639
+ [2022-12-18 12:53:49,514] [INFO] [timer.py:196:stop] epoch=0/micro_step=880/global_step=880, RunningAvgSamplesPerSec=17.54251129449292, CurrSamplesPerSec=17.672755259073455, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
640
+ [2022-12-18 12:56:39,481] [INFO] [logging.py:68:log_dist] [Rank 0] step=890, skipped=4, lr=[9.144444444444444e-06], mom=[[0.9, 0.999]]
641
+ [2022-12-18 12:56:39,482] [INFO] [timer.py:196:stop] epoch=0/micro_step=890/global_step=890, RunningAvgSamplesPerSec=17.545431248601155, CurrSamplesPerSec=17.820363263008623, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
642
+ [2022-12-18 12:59:28,307] [INFO] [logging.py:68:log_dist] [Rank 0] step=900, skipped=4, lr=[9.122222222222223e-06], mom=[[0.9, 0.999]]
643
+ [2022-12-18 12:59:28,309] [INFO] [timer.py:196:stop] epoch=0/micro_step=900/global_step=900, RunningAvgSamplesPerSec=17.5474477902634, CurrSamplesPerSec=17.84197843798558, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
644
+ {'loss': 0.0046, 'learning_rate': 9.122222222222223e-06, 'epoch': 7.02}
645
+ [2022-12-18 13:02:23,366] [INFO] [logging.py:68:log_dist] [Rank 0] step=910, skipped=4, lr=[9.100000000000001e-06], mom=[[0.9, 0.999]]
646
+ [2022-12-18 13:02:23,368] [INFO] [timer.py:196:stop] epoch=0/micro_step=910/global_step=910, RunningAvgSamplesPerSec=17.54704328541452, CurrSamplesPerSec=17.599693685140846, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
647
+ [2022-12-18 13:05:10,345] [INFO] [logging.py:68:log_dist] [Rank 0] step=920, skipped=4, lr=[9.077777777777779e-06], mom=[[0.9, 0.999]]
648
+ [2022-12-18 13:05:10,346] [INFO] [timer.py:196:stop] epoch=0/micro_step=920/global_step=920, RunningAvgSamplesPerSec=17.548757345088557, CurrSamplesPerSec=17.80023677023281, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
649
+ {'loss': 0.0045, 'learning_rate': 9.066666666666667e-06, 'epoch': 7.02}
650
+ [2022-12-18 13:07:52,456] [INFO] [logging.py:68:log_dist] [Rank 0] step=930, skipped=4, lr=[9.055555555555556e-06], mom=[[0.9, 0.999]]
651
+ [2022-12-18 13:07:52,457] [INFO] [timer.py:196:stop] epoch=0/micro_step=930/global_step=930, RunningAvgSamplesPerSec=17.555835577865636, CurrSamplesPerSec=16.947261645741815, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
652
+ [2022-12-18 13:10:39,445] [INFO] [logging.py:68:log_dist] [Rank 0] step=940, skipped=4, lr=[9.033333333333334e-06], mom=[[0.9, 0.999]]
653
+ [2022-12-18 13:10:39,447] [INFO] [timer.py:196:stop] epoch=0/micro_step=940/global_step=940, RunningAvgSamplesPerSec=17.558626670023333, CurrSamplesPerSec=17.837192265183017, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
654
+ [2022-12-18 13:13:28,825] [INFO] [logging.py:68:log_dist] [Rank 0] step=950, skipped=4, lr=[9.011111111111111e-06], mom=[[0.9, 0.999]]
655
+ [2022-12-18 13:13:28,827] [INFO] [timer.py:196:stop] epoch=0/micro_step=950/global_step=950, RunningAvgSamplesPerSec=17.561116636037408, CurrSamplesPerSec=17.875913197667035, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
656
+ {'loss': 0.0053, 'learning_rate': 9.011111111111111e-06, 'epoch': 8.0}
657
+ [2022-12-18 13:16:15,728] [INFO] [logging.py:68:log_dist] [Rank 0] step=960, skipped=4, lr=[8.988888888888889e-06], mom=[[0.9, 0.999]]
658
+ [2022-12-18 13:16:15,729] [INFO] [timer.py:196:stop] epoch=0/micro_step=960/global_step=960, RunningAvgSamplesPerSec=17.56308806750036, CurrSamplesPerSec=17.781870571307575, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
659
+ [2022-12-18 13:19:02,146] [INFO] [logging.py:68:log_dist] [Rank 0] step=970, skipped=4, lr=[8.966666666666667e-06], mom=[[0.9, 0.999]]
660
+ [2022-12-18 13:19:02,147] [INFO] [timer.py:196:stop] epoch=0/micro_step=970/global_step=970, RunningAvgSamplesPerSec=17.564578542499007, CurrSamplesPerSec=17.74904968508855, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
661
+ {'loss': 0.0039, 'learning_rate': 8.955555555555555e-06, 'epoch': 8.01}
662
+ [2022-12-18 13:21:50,887] [INFO] [logging.py:68:log_dist] [Rank 0] step=980, skipped=4, lr=[8.944444444444446e-06], mom=[[0.9, 0.999]]
663
+ [2022-12-18 13:21:50,889] [INFO] [timer.py:196:stop] epoch=0/micro_step=980/global_step=980, RunningAvgSamplesPerSec=17.566081411767524, CurrSamplesPerSec=18.11684822516133, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
664
+ [2022-12-18 13:24:37,286] [INFO] [logging.py:68:log_dist] [Rank 0] step=990, skipped=4, lr=[8.922222222222224e-06], mom=[[0.9, 0.999]]
665
+ [2022-12-18 13:24:37,287] [INFO] [timer.py:196:stop] epoch=0/micro_step=990/global_step=990, RunningAvgSamplesPerSec=17.56737818663697, CurrSamplesPerSec=17.898017842980305, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
666
+ [2022-12-18 13:27:24,217] [INFO] [logging.py:68:log_dist] [Rank 0] step=1000, skipped=4, lr=[8.900000000000001e-06], mom=[[0.9, 0.999]]
667
+ [2022-12-18 13:27:24,219] [INFO] [timer.py:196:stop] epoch=0/micro_step=1000/global_step=1000, RunningAvgSamplesPerSec=17.569257091709765, CurrSamplesPerSec=17.374920879792757, MemAllocated=0.53GB, MaxMemAllocated=17.47GB
668
+ {'loss': 0.0046, 'learning_rate': 8.900000000000001e-06, 'epoch': 8.01}
669
+ {'eval_loss': 0.28076171875, 'eval_wer': 17.571297148114077, 'eval_runtime': 1237.4696, 'eval_samples_per_second': 3.118, 'eval_steps_per_second': 0.098, 'epoch': 8.01}
670
+ [2022-12-18 13:48:02,674] [INFO] [logging.py:68:log_dist] [Rank 0] [Torch] Checkpoint global_step1000 is begin to save!
671
+ [2022-12-18 13:48:02,684] [INFO] [logging.py:68:log_dist] [Rank 0] Saving model checkpoint: ./checkpoint-1000/global_step1000/mp_rank_00_model_states.pt
672
+ [2022-12-18 13:48:02,684] [INFO] [torch_checkpoint_engine.py:15:save] [Torch] Saving ./checkpoint-1000/global_step1000/mp_rank_00_model_states.pt...
673
+ [2022-12-18 13:48:03,680] [INFO] [torch_checkpoint_engine.py:17:save] [Torch] Saved ./checkpoint-1000/global_step1000/mp_rank_00_model_states.pt.
674
+ [2022-12-18 13:48:03,682] [INFO] [torch_checkpoint_engine.py:15:save] [Torch] Saving ./checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_optim_states.pt...
675
+ [2022-12-18 13:48:08,206] [INFO] [torch_checkpoint_engine.py:17:save] [Torch] Saved ./checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_optim_states.pt.
676
+ [2022-12-18 13:48:08,208] [INFO] [engine.py:3394:_save_zero_checkpoint] zero checkpoint saved ./checkpoint-1000/global_step1000/zero_pp_rank_0_mp_rank_00_optim_states.pt
677
+ [2022-12-18 13:48:08,208] [INFO] [torch_checkpoint_engine.py:27:commit] [Torch] Checkpoint global_step1000 is ready now!
run.sh ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deepspeed run_speech_recognition_seq2seq_streaming.py \
2
+ --deepspeed="ds_config.json" \
3
+ --model_name_or_path="openai/whisper-small" \
4
+ --dataset_name="mozilla-foundation/common_voice_11_0" \
5
+ --dataset_config_name="ro" \
6
+ --language="romanian" \
7
+ --train_split_name="train+validation" \
8
+ --eval_split_name="test" \
9
+ --model_index_name="Whisper Small Romanian CV11" \
10
+ --max_steps="5000" \
11
+ --output_dir="./" \
12
+ --per_device_train_batch_size="64" \
13
+ --per_device_eval_batch_size="32" \
14
+ --logging_steps="25" \
15
+ --learning_rate="1e-5" \
16
+ --warmup_steps="500" \
17
+ --evaluation_strategy="steps" \
18
+ --eval_steps="1000" \
19
+ --save_strategy="steps" \
20
+ --save_steps="1000" \
21
+ --generation_max_length="225" \
22
+ --length_column_name="input_length" \
23
+ --max_duration_in_seconds="30" \
24
+ --text_column_name="sentence" \
25
+ --freeze_feature_encoder="False" \
26
+ --report_to="tensorboard" \
27
+ --metric_for_best_model="wer" \
28
+ --greater_is_better="False" \
29
+ --load_best_model_at_end \
30
+ --gradient_checkpointing \
31
+ --fp16 \
32
+ --overwrite_output_dir \
33
+ --do_train \
34
+ --do_eval \
35
+ --predict_with_generate \
36
+ --do_normalize_eval \
37
+ --streaming \
38
+ --use_auth_token \
39
+ --push_to_hub
run_speech_recognition_seq2seq_streaming.py ADDED
@@ -0,0 +1,629 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for sequence to sequence speech recognition
18
+ with 🤗 Datasets' streaming mode.
19
+ """
20
+ # You can also adapt this script for your own sequence to sequence speech
21
+ # recognition task. Pointers for this are left as comments.
22
+
23
+ import logging
24
+ import os
25
+ import sys
26
+ from dataclasses import dataclass, field
27
+ from typing import Any, Dict, List, Optional, Union
28
+
29
+ import datasets
30
+ import torch
31
+ from datasets import DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset
32
+ from torch.utils.data import IterableDataset
33
+
34
+ import evaluate
35
+ import transformers
36
+ from transformers import (
37
+ AutoConfig,
38
+ AutoFeatureExtractor,
39
+ AutoModelForSpeechSeq2Seq,
40
+ AutoProcessor,
41
+ AutoTokenizer,
42
+ HfArgumentParser,
43
+ Seq2SeqTrainer,
44
+ Seq2SeqTrainingArguments,
45
+ TrainerCallback,
46
+ set_seed,
47
+ )
48
+ from transformers.models.whisper.english_normalizer import BasicTextNormalizer
49
+ from transformers.trainer_pt_utils import IterableDatasetShard
50
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
51
+ from transformers.utils import check_min_version, send_example_telemetry
52
+ from transformers.utils.versions import require_version
53
+
54
+
55
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
56
+ check_min_version("4.25.0.dev0")
57
+
58
+ require_version("datasets>=1.18.2", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
59
+
60
+ logger = logging.getLogger(__name__)
61
+
62
+
63
+ @dataclass
64
+ class ModelArguments:
65
+ """
66
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
67
+ """
68
+
69
+ model_name_or_path: str = field(
70
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
71
+ )
72
+ config_name: Optional[str] = field(
73
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
74
+ )
75
+ tokenizer_name: Optional[str] = field(
76
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
77
+ )
78
+ feature_extractor_name: Optional[str] = field(
79
+ default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
80
+ )
81
+ cache_dir: Optional[str] = field(
82
+ default=None,
83
+ metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
84
+ )
85
+ use_fast_tokenizer: bool = field(
86
+ default=True,
87
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
88
+ )
89
+ model_revision: str = field(
90
+ default="main",
91
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
92
+ )
93
+ use_auth_token: bool = field(
94
+ default=False,
95
+ metadata={
96
+ "help": (
97
+ "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
98
+ "with private models)."
99
+ )
100
+ },
101
+ )
102
+ freeze_feature_encoder: bool = field(
103
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
104
+ )
105
+ freeze_encoder: bool = field(
106
+ default=False, metadata={"help": "Whether to freeze the entire encoder of the seq2seq model."}
107
+ )
108
+ forced_decoder_ids: List[List[int]] = field(
109
+ default=None,
110
+ metadata={
111
+ "help": (
112
+ "A list of pairs of integers which indicates a mapping from generation indices to token indices "
113
+ "that will be forced before sampling. For example, [[0, 123]] means the first generated token "
114
+ "will always be a token of index 123."
115
+ )
116
+ },
117
+ )
118
+ suppress_tokens: List[int] = field(
119
+ default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
120
+ )
121
+ model_index_name: str = field(default=None, metadata={"help": "Pretty name for the model card."})
122
+
123
+
124
+ @dataclass
125
+ class DataTrainingArguments:
126
+ """
127
+ Arguments pertaining to what data we are going to input our model for training and eval.
128
+ """
129
+
130
+ dataset_name: str = field(
131
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
132
+ )
133
+ dataset_config_name: Optional[str] = field(
134
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
135
+ )
136
+ text_column: Optional[str] = field(
137
+ default=None,
138
+ metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
139
+ )
140
+ max_train_samples: Optional[int] = field(
141
+ default=None,
142
+ metadata={
143
+ "help": (
144
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
145
+ "value if set."
146
+ )
147
+ },
148
+ )
149
+ max_eval_samples: Optional[int] = field(
150
+ default=None,
151
+ metadata={
152
+ "help": (
153
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
154
+ "value if set."
155
+ )
156
+ },
157
+ )
158
+ audio_column_name: str = field(
159
+ default="audio",
160
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
161
+ )
162
+ text_column_name: str = field(
163
+ default="text",
164
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
165
+ )
166
+ max_duration_in_seconds: float = field(
167
+ default=20.0,
168
+ metadata={
169
+ "help": (
170
+ "Truncate audio files that are longer than `max_duration_in_seconds` seconds to"
171
+ " 'max_duration_in_seconds`"
172
+ )
173
+ },
174
+ )
175
+ min_duration_in_seconds: float = field(
176
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
177
+ )
178
+ train_split_name: str = field(
179
+ default="train",
180
+ metadata={
181
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
182
+ },
183
+ )
184
+ eval_split_name: str = field(
185
+ default="test",
186
+ metadata={
187
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
188
+ },
189
+ )
190
+ do_lower_case: bool = field(
191
+ default=False,
192
+ metadata={"help": "Whether the target text should be lower cased."},
193
+ )
194
+ do_remove_punctuation: bool = field(
195
+ default=False,
196
+ metadata={"help": "Whether the target text should be striped of punctuation."},
197
+ )
198
+ do_normalize_eval: bool = field(
199
+ default=True,
200
+ metadata={"help": "Whether to normalise the references and predictions in the eval WER calculation."},
201
+ )
202
+ language: str = field(
203
+ default=None,
204
+ metadata={
205
+ "help": (
206
+ "Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning "
207
+ "only. For English speech recognition, it should be set to `None`."
208
+ )
209
+ },
210
+ )
211
+ task: str = field(
212
+ default="transcribe",
213
+ metadata={"help": "Task, either `transcribe` for speech recognition or `translate` for speech translation."},
214
+ )
215
+ shuffle_buffer_size: Optional[int] = field(
216
+ default=500,
217
+ metadata={
218
+ "help": (
219
+ "The number of streamed examples to download before shuffling them. The large the buffer, "
220
+ "the closer it is to real offline shuffling."
221
+ )
222
+ },
223
+ )
224
+ streaming: bool = field(
225
+ default=True,
226
+ metadata={"help": "Whether to use streaming mode to load and pre-process the data."},
227
+ )
228
+
229
+
230
+ @dataclass
231
+ class DataCollatorSpeechSeq2SeqWithPadding:
232
+ """
233
+ Data collator that will dynamically pad the inputs received.
234
+ Args:
235
+ processor ([`WhisperProcessor`])
236
+ The processor used for processing the data.
237
+ decoder_start_token_id (`int`)
238
+ The begin-of-sentence of the decoder.
239
+ """
240
+
241
+ processor: Any
242
+ decoder_start_token_id: int
243
+
244
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
245
+ # split inputs and labels since they have to be of different lengths and need
246
+ # different padding methods
247
+ model_input_name = self.processor.model_input_names[0]
248
+ input_features = [{model_input_name: feature[model_input_name]} for feature in features]
249
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
250
+
251
+ batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
252
+
253
+ labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
254
+
255
+ # replace padding with -100 to ignore loss correctly
256
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
257
+
258
+ # if bos token is appended in previous tokenization step,
259
+ # cut bos token here as it's append later anyways
260
+ if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
261
+ labels = labels[:, 1:]
262
+
263
+ batch["labels"] = labels
264
+
265
+ return batch
266
+
267
+
268
+ def load_maybe_streaming_dataset(dataset_name, dataset_config_name, split="train", streaming=True, **kwargs):
269
+ """
270
+ Utility function to load a dataset in streaming mode. For datasets with multiple splits,
271
+ each split is loaded individually and then splits combined by taking alternating examples from
272
+ each (interleaving).
273
+ """
274
+ if "+" in split:
275
+ # load multiple splits separated by the `+` symbol with streaming mode
276
+ dataset_splits = [
277
+ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=streaming, **kwargs)
278
+ for split_name in split.split("+")
279
+ ]
280
+ # interleave multiple splits to form one dataset
281
+ interleaved_dataset = interleave_datasets(dataset_splits)
282
+ return interleaved_dataset
283
+ else:
284
+ # load a single split *with* streaming mode
285
+ dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=streaming, **kwargs)
286
+ return dataset
287
+
288
+
289
+ def main():
290
+ # 1. Parse input arguments
291
+ # See all possible arguments in src/transformers/training_args.py
292
+ # or by passing the --help flag to this script.
293
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
294
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
295
+
296
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
297
+ # If we pass only one argument to the script and it's the path to a json file,
298
+ # let's parse it to get our arguments.
299
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
300
+ else:
301
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
302
+
303
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
304
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
305
+ send_example_telemetry("run_speech_recognition_seq2seq_streaming", model_args, data_args)
306
+
307
+ # 2. Setup logging
308
+ logging.basicConfig(
309
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
310
+ datefmt="%m/%d/%Y %H:%M:%S",
311
+ handlers=[logging.StreamHandler(sys.stdout)],
312
+ )
313
+ log_level = training_args.get_process_log_level()
314
+ logger.setLevel(log_level)
315
+ datasets.utils.logging.set_verbosity(log_level)
316
+ transformers.utils.logging.set_verbosity(log_level)
317
+ transformers.utils.logging.enable_default_handler()
318
+ transformers.utils.logging.enable_explicit_format()
319
+
320
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
321
+
322
+ # Log on each process the small summary:
323
+ logger.warning(
324
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
325
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
326
+ )
327
+ logger.info(f"Training/evaluation parameters {training_args}")
328
+
329
+ # Set the verbosity to info of the Transformers logger (on main process only):
330
+ if is_main_process(training_args.local_rank):
331
+ transformers.utils.logging.set_verbosity_info()
332
+ logger.info("Training/evaluation parameters %s", training_args)
333
+
334
+ # 3. Detecting last checkpoint and eventually continue from last checkpoint
335
+ last_checkpoint = None
336
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
337
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
338
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
339
+ raise ValueError(
340
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
341
+ "Use --overwrite_output_dir to overcome."
342
+ )
343
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
344
+ logger.info(
345
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
346
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
347
+ )
348
+
349
+ # Set seed before initializing model.
350
+ set_seed(training_args.seed)
351
+
352
+ # 4. Load dataset
353
+ raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict()
354
+
355
+ if training_args.do_train:
356
+ raw_datasets["train"] = load_maybe_streaming_dataset(
357
+ data_args.dataset_name,
358
+ data_args.dataset_config_name,
359
+ split=data_args.train_split_name,
360
+ use_auth_token=True if model_args.use_auth_token else None,
361
+ streaming=data_args.streaming,
362
+ )
363
+
364
+ if training_args.do_eval:
365
+ raw_datasets["eval"] = load_maybe_streaming_dataset(
366
+ data_args.dataset_name,
367
+ data_args.dataset_config_name,
368
+ split=data_args.eval_split_name,
369
+ use_auth_token=True if model_args.use_auth_token else None,
370
+ streaming=data_args.streaming,
371
+ )
372
+
373
+ raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys())
374
+
375
+ if data_args.audio_column_name not in raw_datasets_features:
376
+ raise ValueError(
377
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
378
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
379
+ f"{', '.join(raw_datasets_features)}."
380
+ )
381
+
382
+ if data_args.text_column_name not in raw_datasets_features:
383
+ raise ValueError(
384
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
385
+ "Make sure to set `--text_column_name` to the correct text column - one of "
386
+ f"{', '.join(raw_datasets_features)}."
387
+ )
388
+
389
+ # 5. Load pretrained model, tokenizer, and feature extractor
390
+ #
391
+ # Distributed training:
392
+ # The .from_pretrained methods guarantee that only one local process can concurrently
393
+ config = AutoConfig.from_pretrained(
394
+ model_args.config_name if model_args.config_name else model_args.model_name_or_path,
395
+ cache_dir=model_args.cache_dir,
396
+ revision=model_args.model_revision,
397
+ use_auth_token=True if model_args.use_auth_token else None,
398
+ )
399
+
400
+ config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
401
+
402
+ if training_args.gradient_checkpointing:
403
+ config.update({"use_cache": False})
404
+
405
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
406
+ model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
407
+ cache_dir=model_args.cache_dir,
408
+ revision=model_args.model_revision,
409
+ use_auth_token=True if model_args.use_auth_token else None,
410
+ )
411
+ tokenizer = AutoTokenizer.from_pretrained(
412
+ model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
413
+ cache_dir=model_args.cache_dir,
414
+ use_fast=model_args.use_fast_tokenizer,
415
+ revision=model_args.model_revision,
416
+ use_auth_token=True if model_args.use_auth_token else None,
417
+ )
418
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
419
+ model_args.model_name_or_path,
420
+ config=config,
421
+ cache_dir=model_args.cache_dir,
422
+ revision=model_args.model_revision,
423
+ use_auth_token=True if model_args.use_auth_token else None,
424
+ )
425
+
426
+ if model.config.decoder_start_token_id is None:
427
+ raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
428
+
429
+ if model_args.freeze_feature_encoder:
430
+ model.freeze_feature_encoder()
431
+
432
+ if model_args.freeze_encoder:
433
+ model.freeze_encoder()
434
+
435
+ if data_args.language is not None:
436
+ # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
437
+ tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
438
+
439
+ # 6. Resample speech dataset if necessary
440
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
441
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
442
+ raw_datasets = raw_datasets.cast_column(
443
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
444
+ )
445
+
446
+ # 7. Preprocessing the datasets.
447
+ # We need to read the audio files as arrays and tokenize the targets.
448
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
449
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
450
+ audio_column_name = data_args.audio_column_name
451
+ text_column_name = data_args.text_column_name
452
+ model_input_name = feature_extractor.model_input_names[0]
453
+ do_lower_case = data_args.do_lower_case
454
+ do_remove_punctuation = data_args.do_remove_punctuation
455
+ normalizer = BasicTextNormalizer() # 'official' text normalizer from OpenAI
456
+
457
+ if data_args.max_train_samples is not None:
458
+ raw_datasets["train"] = (
459
+ raw_datasets["train"].take(data_args.max_train_samples)
460
+ if data_args.streaming
461
+ else raw_datasets["train"].select(range(data_args.max_train_samples))
462
+ )
463
+
464
+ if data_args.max_eval_samples is not None:
465
+ raw_datasets["eval"] = (
466
+ raw_datasets["eval"].take(data_args.max_eval_samples)
467
+ if data_args.streaming
468
+ else raw_datasets["eval"].select(range(data_args.max_eval_samples))
469
+ )
470
+
471
+ def prepare_dataset(batch):
472
+ # process audio
473
+ sample = batch[audio_column_name]
474
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
475
+ # process audio length
476
+ batch[model_input_name] = inputs.get(model_input_name)[0]
477
+ batch["input_length"] = len(sample["array"])
478
+
479
+ # process targets
480
+ input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
481
+ if do_remove_punctuation:
482
+ input_str = normalizer(input_str).strip()
483
+ batch["labels"] = tokenizer(input_str).input_ids
484
+ return batch
485
+
486
+ with training_args.main_process_first(desc="dataset map pre-processing"):
487
+ vectorized_datasets = raw_datasets.map(
488
+ prepare_dataset,
489
+ remove_columns=raw_datasets_features,
490
+ ).with_format("torch")
491
+
492
+ if training_args.do_train and data_args.streaming:
493
+ # manually shuffle if streaming (done by the trainer for non-streaming)
494
+ vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(
495
+ buffer_size=data_args.shuffle_buffer_size,
496
+ seed=training_args.seed,
497
+ )
498
+
499
+ # filter training data that is shorter than min_input_length or longer than
500
+ # max_input_length
501
+ def is_audio_in_length_range(length):
502
+ return min_input_length < length < max_input_length
503
+
504
+ if training_args.do_train:
505
+ vectorized_datasets["train"] = vectorized_datasets["train"].filter(
506
+ is_audio_in_length_range,
507
+ input_columns=["input_length"],
508
+ )
509
+
510
+ # 8. Load Metric
511
+ metric = evaluate.load("wer")
512
+ do_normalize_eval = data_args.do_normalize_eval
513
+
514
+ def compute_metrics(pred):
515
+ pred_ids = pred.predictions
516
+
517
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
518
+
519
+ pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
520
+ # we do not want to group tokens when computing the metrics
521
+ label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
522
+
523
+ if do_normalize_eval:
524
+ pred_str = [normalizer(pred) for pred in pred_str]
525
+ label_str = [normalizer(label) for label in label_str]
526
+ # filtering step to only evaluate the samples that correspond to non-zero references:
527
+ pred_str = [pred_str[i] for i in range(len(pred_str)) if len(label_str[i]) > 0]
528
+ label_str = [label_str[i] for i in range(len(label_str)) if len(label_str[i]) > 0]
529
+
530
+ wer = 100 * metric.compute(predictions=pred_str, references=label_str)
531
+
532
+ return {"wer": wer}
533
+
534
+ # 9. Create a single speech processor
535
+ if is_main_process(training_args.local_rank):
536
+ # save feature extractor, tokenizer and config
537
+ feature_extractor.save_pretrained(training_args.output_dir)
538
+ tokenizer.save_pretrained(training_args.output_dir)
539
+ config.save_pretrained(training_args.output_dir)
540
+
541
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
542
+
543
+ # 10. Define data collator
544
+ data_collator = DataCollatorSpeechSeq2SeqWithPadding(
545
+ processor=processor,
546
+ decoder_start_token_id=model.config.decoder_start_token_id,
547
+ )
548
+
549
+ # 11. Configure Trainer
550
+ # Trainer callback to reinitialise and reshuffle the streamable datasets at the beginning of each epoch
551
+ # Only required for streaming: Trainer automatically shuffles non-streaming datasets
552
+ class ShuffleCallback(TrainerCallback):
553
+ def on_epoch_begin(self, args, state, control, train_dataloader, **kwargs):
554
+ if isinstance(train_dataloader.dataset, IterableDatasetShard):
555
+ pass # set_epoch() is handled by the Trainer
556
+ elif isinstance(train_dataloader.dataset, IterableDataset):
557
+ train_dataloader.dataset.set_epoch(train_dataloader.dataset._epoch + 1)
558
+
559
+ # Initialize Trainer
560
+ trainer = Seq2SeqTrainer(
561
+ model=model,
562
+ args=training_args,
563
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
564
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
565
+ tokenizer=feature_extractor,
566
+ data_collator=data_collator,
567
+ compute_metrics=compute_metrics if training_args.predict_with_generate else None,
568
+ callbacks=[ShuffleCallback()] if data_args.streaming else None,
569
+ )
570
+
571
+ # 12. Training
572
+ if training_args.do_train:
573
+ checkpoint = None
574
+ if training_args.resume_from_checkpoint is not None:
575
+ checkpoint = training_args.resume_from_checkpoint
576
+ elif last_checkpoint is not None:
577
+ checkpoint = last_checkpoint
578
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
579
+ trainer.save_model() # Saves the feature extractor too for easy upload
580
+
581
+ metrics = train_result.metrics
582
+ if data_args.max_train_samples:
583
+ metrics["train_samples"] = data_args.max_train_samples
584
+ trainer.log_metrics("train", metrics)
585
+ trainer.save_metrics("train", metrics)
586
+ trainer.save_state()
587
+
588
+ # 13. Evaluation
589
+ results = {}
590
+ if training_args.do_eval:
591
+ logger.info("*** Evaluate ***")
592
+ metrics = trainer.evaluate(
593
+ metric_key_prefix="eval",
594
+ max_length=training_args.generation_max_length,
595
+ num_beams=training_args.generation_num_beams,
596
+ )
597
+ if data_args.max_eval_samples:
598
+ metrics["eval_samples"] = data_args.max_eval_samples
599
+
600
+ trainer.log_metrics("eval", metrics)
601
+ trainer.save_metrics("eval", metrics)
602
+
603
+ # 14. Write Training Stats
604
+ kwargs = {
605
+ "finetuned_from": model_args.model_name_or_path,
606
+ "tasks": "automatic-speech-recognition",
607
+ "tags": "whisper-event",
608
+ }
609
+ if data_args.dataset_name is not None:
610
+ kwargs["dataset_tags"] = data_args.dataset_name
611
+ if data_args.dataset_config_name is not None:
612
+ kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
613
+ else:
614
+ kwargs["dataset"] = data_args.dataset_name
615
+ if "common_voice" in data_args.dataset_name:
616
+ kwargs["language"] = data_args.dataset_config_name[:2]
617
+ if model_args.model_index_name is not None:
618
+ kwargs["model_name"] = model_args.model_index_name
619
+
620
+ if training_args.push_to_hub:
621
+ trainer.push_to_hub(**kwargs)
622
+ else:
623
+ trainer.create_model_card(**kwargs)
624
+
625
+ return results
626
+
627
+
628
+ if __name__ == "__main__":
629
+ main()
runs/Dec18_08-41-04_fe2747a042f0/1671381730.2013636/events.out.tfevents.1671381730.fe2747a042f0.46148.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3968d16c222675f4f8d35641fe6eb4fe3d4394ff49e9518e75090b9347953dd8
3
+ size 5878
runs/Dec18_08-41-04_fe2747a042f0/events.out.tfevents.1671381730.fe2747a042f0.46148.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:648235cf38bdc7d1daa38bae30699b9ba2dfe43a5b53cc2bc710c8ed357c6f54
3
+ size 10859
special_tokens_map.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "<|startoftranscript|>",
5
+ "<|en|>",
6
+ "<|zh|>",
7
+ "<|de|>",
8
+ "<|es|>",
9
+ "<|ru|>",
10
+ "<|ko|>",
11
+ "<|fr|>",
12
+ "<|ja|>",
13
+ "<|pt|>",
14
+ "<|tr|>",
15
+ "<|pl|>",
16
+ "<|ca|>",
17
+ "<|nl|>",
18
+ "<|ar|>",
19
+ "<|sv|>",
20
+ "<|it|>",
21
+ "<|id|>",
22
+ "<|hi|>",
23
+ "<|fi|>",
24
+ "<|vi|>",
25
+ "<|iw|>",
26
+ "<|uk|>",
27
+ "<|el|>",
28
+ "<|ms|>",
29
+ "<|cs|>",
30
+ "<|ro|>",
31
+ "<|da|>",
32
+ "<|hu|>",
33
+ "<|ta|>",
34
+ "<|no|>",
35
+ "<|th|>",
36
+ "<|ur|>",
37
+ "<|hr|>",
38
+ "<|bg|>",
39
+ "<|lt|>",
40
+ "<|la|>",
41
+ "<|mi|>",
42
+ "<|ml|>",
43
+ "<|cy|>",
44
+ "<|sk|>",
45
+ "<|te|>",
46
+ "<|fa|>",
47
+ "<|lv|>",
48
+ "<|bn|>",
49
+ "<|sr|>",
50
+ "<|az|>",
51
+ "<|sl|>",
52
+ "<|kn|>",
53
+ "<|et|>",
54
+ "<|mk|>",
55
+ "<|br|>",
56
+ "<|eu|>",
57
+ "<|is|>",
58
+ "<|hy|>",
59
+ "<|ne|>",
60
+ "<|mn|>",
61
+ "<|bs|>",
62
+ "<|kk|>",
63
+ "<|sq|>",
64
+ "<|sw|>",
65
+ "<|gl|>",
66
+ "<|mr|>",
67
+ "<|pa|>",
68
+ "<|si|>",
69
+ "<|km|>",
70
+ "<|sn|>",
71
+ "<|yo|>",
72
+ "<|so|>",
73
+ "<|af|>",
74
+ "<|oc|>",
75
+ "<|ka|>",
76
+ "<|be|>",
77
+ "<|tg|>",
78
+ "<|sd|>",
79
+ "<|gu|>",
80
+ "<|am|>",
81
+ "<|yi|>",
82
+ "<|lo|>",
83
+ "<|uz|>",
84
+ "<|fo|>",
85
+ "<|ht|>",
86
+ "<|ps|>",
87
+ "<|tk|>",
88
+ "<|nn|>",
89
+ "<|mt|>",
90
+ "<|sa|>",
91
+ "<|lb|>",
92
+ "<|my|>",
93
+ "<|bo|>",
94
+ "<|tl|>",
95
+ "<|mg|>",
96
+ "<|as|>",
97
+ "<|tt|>",
98
+ "<|haw|>",
99
+ "<|ln|>",
100
+ "<|ha|>",
101
+ "<|ba|>",
102
+ "<|jw|>",
103
+ "<|su|>",
104
+ "<|translate|>",
105
+ "<|transcribe|>",
106
+ "<|startoflm|>",
107
+ "<|startofprev|>",
108
+ "<|nocaptions|>",
109
+ "<|notimestamps|>"
110
+ ],
111
+ "bos_token": {
112
+ "content": "<|endoftext|>",
113
+ "lstrip": false,
114
+ "normalized": true,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "eos_token": {
119
+ "content": "<|endoftext|>",
120
+ "lstrip": false,
121
+ "normalized": true,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ },
125
+ "pad_token": "<|endoftext|>",
126
+ "unk_token": {
127
+ "content": "",
128
+ "lstrip": false,
129
+ "normalized": true,
130
+ "rstrip": false,
131
+ "single_word": false
132
+ }
133
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 1024,
22
+ "name_or_path": "openai/whisper-small",
23
+ "pad_token": null,
24
+ "processor_class": "WhisperProcessor",
25
+ "return_attention_mask": false,
26
+ "special_tokens_map_file": null,
27
+ "tokenizer_class": "WhisperTokenizer",
28
+ "unk_token": {
29
+ "__type": "AddedToken",
30
+ "content": "",
31
+ "lstrip": false,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60005fd531d7ed5584665e9b5f48c71e53c0c0f68bb1e517baa8da557dd1433a
3
+ size 4731
vocab.json ADDED
The diff for this file is too large to render. See raw diff