Ross Wightman
commited on
Commit
•
79507b4
1
Parent(s):
9aa32d3
Add weights and args
Browse files- pytorch_model.bin +3 -0
- train_args.yaml +125 -0
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e78889dea9201c579488bae25db2573f85202ed0e5a4a98dcabb636edddc736
|
3 |
+
size 1216863239
|
train_args.yaml
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aa: rand-m9-n3-inc1-mstd101
|
2 |
+
amp: true
|
3 |
+
amp_dtype: float16
|
4 |
+
amp_impl: native
|
5 |
+
aot_autograd: false
|
6 |
+
aug_repeats: 0
|
7 |
+
aug_splits: 0
|
8 |
+
batch_size: 64
|
9 |
+
bce_loss: false
|
10 |
+
bce_target_thresh: null
|
11 |
+
bn_eps: null
|
12 |
+
bn_momentum: null
|
13 |
+
channels_last: false
|
14 |
+
checkpoint_hist: 10
|
15 |
+
class_map: ''
|
16 |
+
clip_grad: 3.0
|
17 |
+
clip_mode: norm
|
18 |
+
color_jitter: 0.4
|
19 |
+
cooldown_epochs: 0
|
20 |
+
crop_pct: null
|
21 |
+
cutmix: 0.0
|
22 |
+
cutmix_minmax: null
|
23 |
+
data_dir: /data/tfds
|
24 |
+
dataset: tfds/imagenet2012
|
25 |
+
dataset_download: false
|
26 |
+
decay_epochs: 90
|
27 |
+
decay_milestones:
|
28 |
+
- 90
|
29 |
+
- 180
|
30 |
+
- 270
|
31 |
+
decay_rate: 0.1
|
32 |
+
dist_bn: reduce
|
33 |
+
drop: 0.0
|
34 |
+
drop_block: null
|
35 |
+
drop_connect: null
|
36 |
+
drop_path: 0.2
|
37 |
+
epoch_repeats: 0.0
|
38 |
+
epochs: 50
|
39 |
+
eval_metric: top1
|
40 |
+
experiment: ''
|
41 |
+
fast_norm: false
|
42 |
+
fuser: ''
|
43 |
+
gp: null
|
44 |
+
grad_checkpointing: false
|
45 |
+
hflip: 0.5
|
46 |
+
img_size: null
|
47 |
+
in_chans: null
|
48 |
+
initial_checkpoint: ''
|
49 |
+
input_size: null
|
50 |
+
interpolation: ''
|
51 |
+
jsd_loss: false
|
52 |
+
layer_decay: 0.8
|
53 |
+
local_rank: 0
|
54 |
+
log_interval: 50
|
55 |
+
log_wandb: false
|
56 |
+
lr: 6.0e-05
|
57 |
+
lr_base: 0.1
|
58 |
+
lr_base_scale: ''
|
59 |
+
lr_base_size: 256
|
60 |
+
lr_cycle_decay: 0.5
|
61 |
+
lr_cycle_limit: 1
|
62 |
+
lr_cycle_mul: 1.0
|
63 |
+
lr_k_decay: 1.0
|
64 |
+
lr_noise: null
|
65 |
+
lr_noise_pct: 0.67
|
66 |
+
lr_noise_std: 1.0
|
67 |
+
mean: null
|
68 |
+
min_lr: 5.0e-07
|
69 |
+
mixup: 0.0
|
70 |
+
mixup_mode: batch
|
71 |
+
mixup_off_epoch: 0
|
72 |
+
mixup_prob: 1.0
|
73 |
+
mixup_switch_prob: 0.5
|
74 |
+
model: vit_large_patch14_clip_224.openai_ft_in12k
|
75 |
+
model_ema: true
|
76 |
+
model_ema_decay: 0.9997
|
77 |
+
model_ema_force_cpu: false
|
78 |
+
momentum: 0.9
|
79 |
+
no_aug: false
|
80 |
+
no_ddp_bb: false
|
81 |
+
no_prefetcher: false
|
82 |
+
no_resume_opt: false
|
83 |
+
num_classes: 1000
|
84 |
+
opt: adamw
|
85 |
+
opt_betas: null
|
86 |
+
opt_eps: null
|
87 |
+
output: ''
|
88 |
+
patience_epochs: 10
|
89 |
+
pin_mem: false
|
90 |
+
pretrained: true
|
91 |
+
ratio:
|
92 |
+
- 0.75
|
93 |
+
- 1.3333333333333333
|
94 |
+
recount: 1
|
95 |
+
recovery_interval: 0
|
96 |
+
remode: pixel
|
97 |
+
reprob: 0.3
|
98 |
+
resplit: false
|
99 |
+
resume: ''
|
100 |
+
save_images: false
|
101 |
+
scale:
|
102 |
+
- 0.08
|
103 |
+
- 1.0
|
104 |
+
sched: cosine
|
105 |
+
sched_on_updates: false
|
106 |
+
seed: 42
|
107 |
+
smoothing: 0.1
|
108 |
+
split_bn: false
|
109 |
+
start_epoch: null
|
110 |
+
std: null
|
111 |
+
sync_bn: false
|
112 |
+
torchscript: false
|
113 |
+
train_interpolation: random
|
114 |
+
train_split: train
|
115 |
+
tta: 0
|
116 |
+
use_multi_epochs_loader: false
|
117 |
+
val_split: validation
|
118 |
+
validation_batch_size: null
|
119 |
+
vflip: 0.0
|
120 |
+
warmup_epochs: 10
|
121 |
+
warmup_lr: 1.0e-06
|
122 |
+
warmup_prefix: false
|
123 |
+
weight_decay: 0.01
|
124 |
+
worker_seeding: all
|
125 |
+
workers: 6
|