zhl2020 commited on
Commit
a098611
1 Parent(s): 4c8cce9

Training in progress, epoch 0

Browse files
config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MyModelForImageClassification"
4
+ ],
5
+ "initializer_factor": 1.0,
6
+ "initializer_range": 0.02,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "chinese_clip",
9
+ "projection_dim": 512,
10
+ "text_config": {
11
+ "model_type": "chinese_clip_text_model"
12
+ },
13
+ "torch_dtype": "float32",
14
+ "transformers_version": "4.32.0",
15
+ "vision_config": {
16
+ "model_type": "chinese_clip_vision_model"
17
+ }
18
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": false,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "ChineseCLIPImageProcessor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "height": 224,
26
+ "width": 224
27
+ }
28
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8297cd348d49e902d064a67cb81d05f275b30954c578375d8169d0134a937ae8
3
+ size 1222945469
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ed838464284d1c73b24c3e54b70dedcbbbc222b2a6623bedd24ee56f5b753c
3
+ size 4155