clip-spanish / run-clip.sh
edugp's picture
Switch to BERTIN model for training script and testing on image
04899df
raw
history blame contribute delete
815 Bytes
python run_hybrid_clip.py \
--output_dir "./output_141230_training_examples" \
--text_model_name_or_path="bertin-project/bertin-roberta-base-spanish" \
--vision_model_name_or_path="openai/clip-vit-base-patch32" \
--tokenizer_name="bertin-project/bertin-roberta-base-spanish" \
--train_file="/home/${USER}/data/wit_scale_converted/train_dataset_scale_converted_98_1_1_split.json" \
--validation_file="/home/${USER}/data/wit_scale_converted/valid_dataset_scale_converted_98_1_1_split.json" \
--do_train \
--do_eval \
--num_train_epochs="40" \
--max_seq_length 96 \
--per_device_train_batch_size="64" \
--per_device_eval_batch_size="64" \
--learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \
--overwrite_output_dir \
--preprocessing_num_workers 32