# From https://arxiv.org/pdf/1907.11692.pdf | |
./run_mlm_flax.py \ | |
--model_name_or_path="bert-base-multilingual-cased" \ | |
--output_dir="./" \ | |
--model_type="bert" \ | |
--config_name="./" \ | |
--validation_file="./datasets/pulpo_lines_clean_val.json" \ | |
--train_file="./datasets/pulpo_lines_clean_train.json" \ | |
--tokenizer_name="bert-base-multilingual-cased" \ | |
--max_seq_length="32" \ | |
--pad_to_max_length \ | |
--per_device_train_batch_size="256" \ | |
--per_device_eval_batch_size="256" \ | |
--adam_beta1="0.9" \ | |
--adam_beta2="0.98" \ | |
--adam_epsilon="1e-6" \ | |
--learning_rate="1.25e-4" \ | |
--weight_decay="0.01" \ | |
--save_strategy="steps" \ | |
--save_steps="1000" \ | |
--save_total_limit="5" \ | |
--warmup_steps="10000" \ | |
--num_train_epochs="40" \ | |
--overwrite_output_dir \ | |
--eval_steps="1000" \ | |
--save_total_limit="1000" \ | |
--logging_steps="500" 2>&1 | tee run.log | |