clip-spanish / run-clip.sh
edugp's picture
Add training scripts and initial model trained on 1% of the data.
8e2b754
raw
history blame
801 Bytes
HUB_TOKEN=`cat $HOME/.huggingface/token`
python run_hybrid_clip.py \
--output_dir "./output_dir" \
--text_model_name_or_path="dccuchile/bert-base-spanish-wwm-cased" \
--vision_model_name_or_path="openai/clip-vit-base-patch32" \
--tokenizer_name="dccuchile/bert-base-spanish-wwm-cased" \
--train_file="/home/${USER}/data/wit/prepared_dataset/train_dataset_filtered.json" \
--validation_file="/home/${USER}/data/wit/prepared_dataset/valid_dataset_filtered.json" \
--do_train --do_eval \
--num_train_epochs="40" \
--max_seq_length 96 \
--per_device_train_batch_size="64" \
--per_device_eval_batch_size="64" \
--learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \
--overwrite_output_dir \
--preprocessing_num_workers 32
#--push_to_hub