kmpartner commited on
Commit
0158cc1
1 Parent(s): 8fc92d2

Upload folder using huggingface_hub

Browse files
controlnet/diffusion_pytorch_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1420180f584edeb3aa28c9dab7d4bef4317b71bfbe7e3c8cd5d977708fe7f507
3
- size 1445157120
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8199ce735834688ef40e758d7e7086b69361da4bb41dbcbbdd90b0172fcae69a
3
+ size 722598648
feature_extractor/preprocessor_config.json CHANGED
@@ -1,21 +1,4 @@
1
  {
2
- "_valid_processor_keys": [
3
- "images",
4
- "do_resize",
5
- "size",
6
- "resample",
7
- "do_center_crop",
8
- "crop_size",
9
- "do_rescale",
10
- "rescale_factor",
11
- "do_normalize",
12
- "image_mean",
13
- "image_std",
14
- "do_convert_rgb",
15
- "return_tensors",
16
- "data_format",
17
- "input_data_format"
18
- ],
19
  "crop_size": {
20
  "height": 224,
21
  "width": 224
@@ -25,6 +8,7 @@
25
  "do_normalize": true,
26
  "do_rescale": true,
27
  "do_resize": true,
 
28
  "image_mean": [
29
  0.48145466,
30
  0.4578275,
 
1
  {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  "crop_size": {
3
  "height": 224,
4
  "width": 224
 
8
  "do_normalize": true,
9
  "do_rescale": true,
10
  "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
  "image_mean": [
13
  0.48145466,
14
  0.4578275,
logs/text2image-fine-tune/1717821007.8383396/events.out.tfevents.1717821007.0f914b078d44.4636.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:319a1493b00763d4a312319e60271fc5191530e75b74d632d823ec2b49f98d2f
3
+ size 2436
logs/text2image-fine-tune/1717821007.8419707/hparams.yml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adam_beta1: 0.9
2
+ adam_beta2: 0.999
3
+ adam_epsilon: 1.0e-08
4
+ adam_weight_decay: 0.01
5
+ allow_tf32: false
6
+ cache_dir: null
7
+ center_crop: true
8
+ checkpointing_steps: 5
9
+ checkpoints_total_limit: null
10
+ dataloader_num_workers: 0
11
+ dataset_config_name: null
12
+ enable_xformers_memory_efficient_attention: false
13
+ gradient_accumulation_steps: 4
14
+ gradient_checkpointing: true
15
+ lambda_kd_feat: 1.0
16
+ lambda_kd_output: 1.0
17
+ lambda_sd: 1.0
18
+ learning_rate: 5.0e-05
19
+ local_rank: -1
20
+ logging_dir: logs
21
+ lr_scheduler: constant
22
+ lr_warmup_steps: 0
23
+ max_grad_norm: 1.0
24
+ max_train_samples: null
25
+ max_train_steps: 0
26
+ mixed_precision: fp16
27
+ non_ema_revision: null
28
+ num_train_epochs: 0
29
+ num_valid_images: 2
30
+ output_dir: ./results/toy_bk_small
31
+ pretrained_model_name_or_path: CompVis/stable-diffusion-v1-4
32
+ random_flip: true
33
+ report_to: all
34
+ resolution: 512
35
+ resume_from_checkpoint: null
36
+ revision: null
37
+ scale_lr: false
38
+ seed: 1234
39
+ train_batch_size: 2
40
+ train_data_dir: ./data/laion_aes/preprocessed_11k
41
+ unet_config_name: bk_small
42
+ unet_config_path: ./src/unet_config
43
+ use_8bit_adam: false
44
+ use_copy_weight_from_teacher: true
45
+ use_ema: true
46
+ valid_prompt: a golden vase with different flowers
47
+ valid_steps: 5
logs/text2image-fine-tune/events.out.tfevents.1717820999.0f914b078d44.4636.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f4bea260be0f9aa57f01594a4a8c829bd39279bba9dbf02c5c260cc5ea07935
3
+ size 88
model_index.json CHANGED
@@ -1,7 +1,11 @@
1
  {
2
- "_class_name": "StableDiffusionPipeline",
3
  "_diffusers_version": "0.28.2",
4
- "_name_or_path": "kmpartner/bk-test",
 
 
 
 
5
  "feature_extractor": [
6
  "transformers",
7
  "CLIPImageProcessor"
@@ -33,6 +37,6 @@
33
  ],
34
  "vae": [
35
  "diffusers",
36
- "AutoencoderTiny"
37
  ]
38
  }
 
1
  {
2
+ "_class_name": "StableDiffusionControlNetPipeline",
3
  "_diffusers_version": "0.28.2",
4
+ "_name_or_path": "CompVis/stable-diffusion-v1-4",
5
+ "controlnet": [
6
+ "diffusers",
7
+ "ControlNetModel"
8
+ ],
9
  "feature_extractor": [
10
  "transformers",
11
  "CLIPImageProcessor"
 
37
  ],
38
  "vae": [
39
  "diffusers",
40
+ "AutoencoderKL"
41
  ]
42
  }
safety_checker/config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "_commit_hash": "858c800d0667613dc2f87f859e4843a95f8120cc",
3
- "_name_or_path": "/root/.cache/huggingface/hub/models--kmpartner--bk-test/snapshots/858c800d0667613dc2f87f859e4843a95f8120cc/safety_checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
6
  ],
@@ -86,7 +86,7 @@
86
  "vocab_size": 49408
87
  },
88
  "torch_dtype": "float32",
89
- "transformers_version": "4.41.2",
90
  "vision_config": {
91
  "_name_or_path": "",
92
  "add_cross_attention": false,
 
1
  {
2
+ "_commit_hash": "133a221b8aa7292a167afc5127cb63fb5005638b",
3
+ "_name_or_path": "/root/.cache/huggingface/hub/models--CompVis--stable-diffusion-v1-4/snapshots/133a221b8aa7292a167afc5127cb63fb5005638b/safety_checker",
4
  "architectures": [
5
  "StableDiffusionSafetyChecker"
6
  ],
 
86
  "vocab_size": 49408
87
  },
88
  "torch_dtype": "float32",
89
+ "transformers_version": null,
90
  "vision_config": {
91
  "_name_or_path": "",
92
  "add_cross_attention": false,
text_encoder/config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "kmpartner/bk-test",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
@@ -18,8 +18,8 @@
18
  "num_attention_heads": 12,
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
- "projection_dim": 768,
22
- "torch_dtype": "float32",
23
  "transformers_version": "4.27.4",
24
  "vocab_size": 49408
25
  }
 
1
  {
2
+ "_name_or_path": "CompVis/stable-diffusion-v1-4",
3
  "architectures": [
4
  "CLIPTextModel"
5
  ],
 
18
  "num_attention_heads": 12,
19
  "num_hidden_layers": 12,
20
  "pad_token_id": 1,
21
+ "projection_dim": 512,
22
+ "torch_dtype": "float16",
23
  "transformers_version": "4.27.4",
24
  "vocab_size": 49408
25
  }
text_encoder/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71c10601ece1342fede0300fb88db71c09ece8be491b71875d18b9799a5e6c15
3
- size 492265880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e
3
+ size 246144864
tokenizer/tokenizer_config.json CHANGED
@@ -1,23 +1,5 @@
1
  {
2
  "add_prefix_space": false,
3
- "added_tokens_decoder": {
4
- "49406": {
5
- "content": "<|startoftext|>",
6
- "lstrip": false,
7
- "normalized": true,
8
- "rstrip": false,
9
- "single_word": false,
10
- "special": true
11
- },
12
- "49407": {
13
- "content": "<|endoftext|>",
14
- "lstrip": false,
15
- "normalized": true,
16
- "rstrip": false,
17
- "single_word": false,
18
- "special": true
19
- }
20
- },
21
  "bos_token": {
22
  "__type": "AddedToken",
23
  "content": "<|startoftext|>",
@@ -26,7 +8,6 @@
26
  "rstrip": false,
27
  "single_word": false
28
  },
29
- "clean_up_tokenization_spaces": true,
30
  "do_lower_case": true,
31
  "eos_token": {
32
  "__type": "AddedToken",
@@ -39,7 +20,7 @@
39
  "errors": "replace",
40
  "model_max_length": 77,
41
  "pad_token": "<|endoftext|>",
42
- "special_tokens_map_file": "/root/.cache/huggingface/hub/models--kmpartner--bk-test/snapshots/858c800d0667613dc2f87f859e4843a95f8120cc/tokenizer/special_tokens_map.json",
43
  "tokenizer_class": "CLIPTokenizer",
44
  "unk_token": {
45
  "__type": "AddedToken",
 
1
  {
2
  "add_prefix_space": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "bos_token": {
4
  "__type": "AddedToken",
5
  "content": "<|startoftext|>",
 
8
  "rstrip": false,
9
  "single_word": false
10
  },
 
11
  "do_lower_case": true,
12
  "eos_token": {
13
  "__type": "AddedToken",
 
20
  "errors": "replace",
21
  "model_max_length": 77,
22
  "pad_token": "<|endoftext|>",
23
+ "special_tokens_map_file": "./special_tokens_map.json",
24
  "tokenizer_class": "CLIPTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
unet/config.json CHANGED
@@ -10,6 +10,7 @@
10
  "block_out_channels": [
11
  320,
12
  640,
 
13
  1280
14
  ],
15
  "center_input_sample": false,
@@ -22,7 +23,8 @@
22
  "down_block_types": [
23
  "CrossAttnDownBlock2D",
24
  "CrossAttnDownBlock2D",
25
- "CrossAttnDownBlock2D"
 
26
  ],
27
  "downsample_padding": 1,
28
  "dropout": 0.0,
@@ -55,6 +57,7 @@
55
  "timestep_post_act": null,
56
  "transformer_layers_per_block": 1,
57
  "up_block_types": [
 
58
  "CrossAttnUpBlock2D",
59
  "CrossAttnUpBlock2D",
60
  "CrossAttnUpBlock2D"
 
10
  "block_out_channels": [
11
  320,
12
  640,
13
+ 1280,
14
  1280
15
  ],
16
  "center_input_sample": false,
 
23
  "down_block_types": [
24
  "CrossAttnDownBlock2D",
25
  "CrossAttnDownBlock2D",
26
+ "CrossAttnDownBlock2D",
27
+ "DownBlock2D"
28
  ],
29
  "downsample_padding": 1,
30
  "dropout": 0.0,
 
57
  "timestep_post_act": null,
58
  "transformer_layers_per_block": 1,
59
  "up_block_types": [
60
+ "UpBlock2D",
61
  "CrossAttnUpBlock2D",
62
  "CrossAttnUpBlock2D",
63
  "CrossAttnUpBlock2D"
unet/diffusion_pytorch_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e7506c2f819d897010902927b8f204679374d7610e0cb45cbc37489fb007ee9
3
- size 1293583616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9e9d51e8c2e414f5e4ef31164276798dcad2761559e290ed2c80d9900fe7c77
3
+ size 1929435608
vae/config.json CHANGED
@@ -1,45 +1,34 @@
1
  {
2
- "_class_name": "AutoencoderTiny",
3
  "_diffusers_version": "0.28.2",
4
- "_name_or_path": "madebyollin/taesd",
5
- "act_fn": "relu",
6
  "block_out_channels": [
7
- 64,
8
- 64,
9
- 64,
10
- 64
11
  ],
12
- "decoder_block_out_channels": [
13
- 64,
14
- 64,
15
- 64,
16
- 64
17
  ],
18
- "encoder_block_out_channels": [
19
- 64,
20
- 64,
21
- 64,
22
- 64
23
- ],
24
- "force_upcast": false,
25
  "in_channels": 3,
26
  "latent_channels": 4,
27
- "latent_magnitude": 3,
28
- "latent_shift": 0.5,
29
- "num_decoder_blocks": [
30
- 3,
31
- 3,
32
- 3,
33
- 1
34
- ],
35
- "num_encoder_blocks": [
36
- 1,
37
- 3,
38
- 3,
39
- 3
40
- ],
41
  "out_channels": 3,
42
- "scaling_factor": 1.0,
43
- "upsample_fn": "nearest",
44
- "upsampling_scaling_factor": 2
 
 
 
 
 
45
  }
 
1
  {
2
+ "_class_name": "AutoencoderKL",
3
  "_diffusers_version": "0.28.2",
4
+ "_name_or_path": "CompVis/stable-diffusion-v1-4",
5
+ "act_fn": "silu",
6
  "block_out_channels": [
7
+ 128,
8
+ 256,
9
+ 512,
10
+ 512
11
  ],
12
+ "down_block_types": [
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D",
16
+ "DownEncoderBlock2D"
17
  ],
18
+ "force_upcast": true,
 
 
 
 
 
 
19
  "in_channels": 3,
20
  "latent_channels": 4,
21
+ "latents_mean": null,
22
+ "latents_std": null,
23
+ "layers_per_block": 2,
24
+ "norm_num_groups": 32,
 
 
 
 
 
 
 
 
 
 
25
  "out_channels": 3,
26
+ "sample_size": 512,
27
+ "scaling_factor": 0.18215,
28
+ "up_block_types": [
29
+ "UpDecoderBlock2D",
30
+ "UpDecoderBlock2D",
31
+ "UpDecoderBlock2D",
32
+ "UpDecoderBlock2D"
33
+ ]
34
  }
vae/diffusion_pytorch_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7956d561b1efbd861ad9b03fd8f01510f9e87eddc07bdfd20837009433f6ee5
3
- size 9793292
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c
3
+ size 167335342