add diffusers weights

#1
by YiYiXu - opened
Files changed (3) hide show
  1. README.md +39 -0
  2. config.json +21 -0
  3. diffusion_pytorch_model.safetensors +3 -0
README.md CHANGED
@@ -91,6 +91,45 @@ Which should give you an image like below:
91
 
92
  ![A girl sitting in a cafe](sample_result.png)
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  ### Preprocessing
96
 
 
91
 
92
  ![A girl sitting in a cafe](sample_result.png)
93
 
94
+ ### Using Controlnets in Diffusers
95
+
96
+ Make sure you upgrade to the latest diffusers version: `pip install -U diffusers`. And then you can run:
97
+
98
+ ```python
99
+ import torch
100
+ from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel
101
+ from diffusers.utils import load_image
102
+
103
+ controlnet = SD3ControlNetModel.from_pretrained("stabilityai/stable-diffusion-3.5-large-controlnet-depth", torch_dtype=torch.float16)
104
+ pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
105
+ "stabilityai/stable-diffusion-3.5-large",
106
+ controlnet=controlnet,
107
+ torch_dtype=torch.float16,
108
+ ).to("cuda")
109
+
110
+ control_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/marigold/marigold_einstein_lcm_depth.png")
111
+ generator = torch.Generator(device="cpu").manual_seed(0)
112
+ image = pipe(
113
+ prompt = "a photo of a man",
114
+ control_image=control_image,
115
+ guidance_scale=4.5,
116
+ num_inference_steps=40,
117
+ generator=generator,
118
+ max_sequence_length=77,
119
+ ).images[0]
120
+ image.save('depth-8b.jpg')
121
+ ```
122
+
123
+ You can use `image_gen_aux` to extract `depth_image`, which contains all the preprocessor required to use with diffusers pipelines.
124
+
125
+ ```python
126
+ # install image_gen_aux with: pip install git+https://github.com/huggingface/image_gen_aux.git
127
+ from image_gen_aux import DepthPreprocessor
128
+ image = load_image("path to image")
129
+
130
+ depth_preprocessor = DepthPreprocessor.from_pretrained("depth-anything/Depth-Anything-V2-Large-hf").to("cuda")
131
+ depth_image = depth_preprocessor(image, invert=True)[0].convert("RGB")
132
+ ```
133
 
134
  ### Preprocessing
135
 
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "SD3ControlNetModel",
3
+ "_diffusers_version": "0.32.0.dev0",
4
+ "attention_head_dim": 64,
5
+ "caption_projection_dim": 2048,
6
+ "dual_attention_layers": [],
7
+ "extra_conditioning_channels": 0,
8
+ "force_zeros_for_pooled_projection": false,
9
+ "in_channels": 16,
10
+ "joint_attention_dim": null,
11
+ "num_attention_heads": 38,
12
+ "num_layers": 19,
13
+ "out_channels": 16,
14
+ "patch_size": 2,
15
+ "pooled_projection_dim": 2048,
16
+ "pos_embed_max_size": null,
17
+ "pos_embed_type": null,
18
+ "qk_norm": null,
19
+ "sample_size": 128,
20
+ "use_pos_embed": false
21
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cd54d6c670ebc864b459f251573bc0f9cb60f2adf644dbd4df4ffab6879b1a1
3
+ size 8614110992