Upload folder using huggingface_hub
Browse files
transformer/config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "FluxTransformer2DModel",
|
3 |
+
"_diffusers_version": "0.31.0.dev0",
|
4 |
+
"_name_or_path": "black-forest-labs/FLUX.1-dev",
|
5 |
+
"attention_head_dim": 128,
|
6 |
+
"axes_dims_rope": [
|
7 |
+
16,
|
8 |
+
56,
|
9 |
+
56
|
10 |
+
],
|
11 |
+
"guidance_embeds": true,
|
12 |
+
"in_channels": 64,
|
13 |
+
"joint_attention_dim": 4096,
|
14 |
+
"num_attention_heads": 24,
|
15 |
+
"num_layers": 19,
|
16 |
+
"num_single_layers": 38,
|
17 |
+
"patch_size": 1,
|
18 |
+
"pooled_projection_dim": 768,
|
19 |
+
"quantization_config": {
|
20 |
+
"_load_in_4bit": true,
|
21 |
+
"_load_in_8bit": false,
|
22 |
+
"bnb_4bit_compute_dtype": "float16",
|
23 |
+
"bnb_4bit_quant_storage": "uint8",
|
24 |
+
"bnb_4bit_quant_type": "nf4",
|
25 |
+
"bnb_4bit_use_double_quant": false,
|
26 |
+
"llm_int8_enable_fp32_cpu_offload": false,
|
27 |
+
"llm_int8_has_fp16_weight": false,
|
28 |
+
"llm_int8_skip_modules": null,
|
29 |
+
"llm_int8_threshold": 6.0,
|
30 |
+
"load_in_4bit": true,
|
31 |
+
"load_in_8bit": false,
|
32 |
+
"quant_method": "bitsandbytes"
|
33 |
+
}
|
34 |
+
}
|
transformer/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e33fab96afc896107fe1c18bdb7f45f5906c9c4d0a621ed47ca0111295b522c1
|
3 |
+
size 6699380895
|