Training in progress, step 100
Browse files- .ipynb_checkpoints/run-checkpoint.sh +2 -2
- pytorch_model.bin +1 -1
- run.sh +2 -2
- special_tokens_map.json +1 -1
- training_args.bin +1 -1
.ipynb_checkpoints/run-checkpoint.sh
CHANGED
@@ -7,12 +7,12 @@ python run_speech_recognition_ctc.py \
|
|
7 |
--max_steps="1000" \
|
8 |
--per_device_train_batch_size="16" \
|
9 |
--learning_rate="3e-4" \
|
10 |
-
--save_total_limit="
|
11 |
--evaluation_strategy="steps" \
|
12 |
--text_column_name="sentence" \
|
13 |
--length_column_name="input_length" \
|
14 |
--chars_to_ignore ,\?\.\!\-\;\:\"\โ\%\โ\โ\๏ฟฝ\ู\ู\ูู,ู\
|
15 |
-
--save_steps="
|
16 |
--layerdrop="0.0" \
|
17 |
--freeze_feature_encoder \
|
18 |
--gradient_checkpointing \
|
|
|
7 |
--max_steps="1000" \
|
8 |
--per_device_train_batch_size="16" \
|
9 |
--learning_rate="3e-4" \
|
10 |
+
--save_total_limit="10" \
|
11 |
--evaluation_strategy="steps" \
|
12 |
--text_column_name="sentence" \
|
13 |
--length_column_name="input_length" \
|
14 |
--chars_to_ignore ,\?\.\!\-\;\:\"\โ\%\โ\โ\๏ฟฝ\ู\ู\ูู,ู\
|
15 |
+
--save_steps="100" \
|
16 |
--layerdrop="0.0" \
|
17 |
--freeze_feature_encoder \
|
18 |
--gradient_checkpointing \
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1262149169
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38184d7f79d549af3476118077e93ead8218cb665f2d15d239f2cc0611365b5c
|
3 |
size 1262149169
|
run.sh
CHANGED
@@ -7,12 +7,12 @@ python run_speech_recognition_ctc.py \
|
|
7 |
--max_steps="1000" \
|
8 |
--per_device_train_batch_size="16" \
|
9 |
--learning_rate="3e-4" \
|
10 |
-
--save_total_limit="
|
11 |
--evaluation_strategy="steps" \
|
12 |
--text_column_name="sentence" \
|
13 |
--length_column_name="input_length" \
|
14 |
--chars_to_ignore ,\?\.\!\-\;\:\"\โ\%\โ\โ\๏ฟฝ\ู\ู\ูู,ู\
|
15 |
-
--save_steps="
|
16 |
--layerdrop="0.0" \
|
17 |
--freeze_feature_encoder \
|
18 |
--gradient_checkpointing \
|
|
|
7 |
--max_steps="1000" \
|
8 |
--per_device_train_batch_size="16" \
|
9 |
--learning_rate="3e-4" \
|
10 |
+
--save_total_limit="10" \
|
11 |
--evaluation_strategy="steps" \
|
12 |
--text_column_name="sentence" \
|
13 |
--length_column_name="input_length" \
|
14 |
--chars_to_ignore ,\?\.\!\-\;\:\"\โ\%\โ\โ\๏ฟฝ\ู\ู\ูู,ู\
|
15 |
+
--save_steps="100" \
|
16 |
--layerdrop="0.0" \
|
17 |
--freeze_feature_encoder \
|
18 |
--gradient_checkpointing \
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2991
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0e764bedbce34050e8790d7b53d28adc314fc2eec7012bab818227199ae9348
|
3 |
size 2991
|