sharkMeow commited on
Commit
d76da5b
1 Parent(s): 9184a1a

Training in progress, step 500

Browse files
Files changed (4) hide show
  1. config.json +34 -0
  2. config.txt +19 -0
  3. model.safetensors +3 -0
  4. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "OFA-Sys/chinese-clip-vit-base-patch16",
3
+ "architectures": [
4
+ "ChineseCLIPImageDualModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "initializer_range": 0.02,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "chinese_clip",
10
+ "projection_dim": 512,
11
+ "text_config": {
12
+ "architectures": [
13
+ "ChineseCLIPTextModel"
14
+ ],
15
+ "bos_token_id": 0,
16
+ "directionality": "bidi",
17
+ "eos_token_id": 2,
18
+ "model_type": "chinese_clip_text_model",
19
+ "output_past": true,
20
+ "pooler_fc_size": 768,
21
+ "pooler_num_attention_heads": 12,
22
+ "pooler_num_fc_layers": 3,
23
+ "pooler_size_per_head": 128,
24
+ "pooler_type": "first_token_transform",
25
+ "vocab_size": 21128
26
+ },
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.42.3",
29
+ "vision_config": {
30
+ "dropout": 0.0,
31
+ "model_type": "chinese_clip_vision_model",
32
+ "patch_size": 16
33
+ }
34
+ }
config.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python clip_aoi_train.py \
2
+ --output_dir ./aoi_clip_high_resolution_crossAttenttionFusion_fusin_gpt_random_sampler \
3
+ --model_name_or_path OFA-Sys/chinese-clip-vit-base-patch16\
4
+ --image_processor_name OFA-Sys/chinese-clip-vit-base-patch16 \
5
+ --tokenizer_name OFA-Sys/chinese-clip-vit-base-patch16 \
6
+ --train_file ./data_csv/AOI/train_high_resolution_gpt.csv \
7
+ --validation_file ./data_csv/AOI/valid_high_resolution_gpt.csv \
8
+ --image_column image_path --caption_column text --aoi_caption_column aoi_text\
9
+ --remove_unused_columns=False --do_eval --do_train \
10
+ --per_device_train_batch_size=40 --per_device_eval_batch_size=20 \
11
+ --gradient_accumulation_steps=10 --overwrite_output_dir\
12
+ --num_train_epochs=200 \
13
+ --learning_rate=1e-5 --warmup_steps=0 --weight_decay 0.1 \
14
+ --save_total_limit 5 \
15
+ --eval_strategy steps --logging_steps 0.1 --report_to wandb --fp16 --push_to_hub --batch_eval_metrics \
16
+ --max_seq_length 153 --aoi_max_seq_length 384
17
+ #--model_name_or_path OFA-Sys/chinese-clip-vit-base-patch16
18
+ #--overwrite_output_dir \
19
+ # aoi_clip_high_resolution_concate_fusin_gpt_random_sampler
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:536a190a6161a2aaf2741479c1dd845eead0e300cec45f1ab349f4c1a9b83f31
3
+ size 1164561480
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a65c5cfb22fcd570dd5cbf7c58678cc789f306da61bd04f94281acec7b3825ea
3
+ size 5240