takahirox commited on
Commit
0c32ef6
0 Parent(s):

initial commit

Browse files
Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +127 -0
  4. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: First Demo
3
+ emoji: 🌖
4
+ colorFrom: green
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 4.7.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ ControlNetModel,
3
+ StableDiffusionImg2ImgPipeline,
4
+ StableDiffusionControlNetImg2ImgPipeline,
5
+ )
6
+ from compel import Compel
7
+ from PIL import Image
8
+ import cv2
9
+ import gc
10
+ import gradio
11
+ import numpy
12
+ import torch
13
+
14
+ base_model = "SimianLuo/LCM_Dreamshaper_v7"
15
+ controlnet_model = "lllyasviel/control_v11p_sd15_canny"
16
+ device = "cuda"
17
+ dtype = torch.float16
18
+ width = 512
19
+ height = 512
20
+
21
+ controlnet = ControlNetModel.from_pretrained(
22
+ controlnet_model, tourch_dtype=dtype
23
+ )
24
+
25
+ pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
26
+ base_model, controlnet=controlnet, safety_checker=None
27
+ ).to(dtype=dtype)
28
+ pipe.enable_model_cpu_offload(device=device)
29
+ pipe.unet.to(memory_format=torch.channels_last)
30
+
31
+ compel_proc = Compel(
32
+ tokenizer=pipe.tokenizer,
33
+ text_encoder=pipe.text_encoder,
34
+ truncate_long_prompts=False,
35
+ )
36
+
37
+ pipe_no_controlnet = StableDiffusionImg2ImgPipeline.from_pretrained(
38
+ base_model, safety_checker=None
39
+ ).to(dtype=dtype)
40
+ pipe.enable_model_cpu_offload(device=device)
41
+ pipe_no_controlnet.enable_model_cpu_offload()
42
+
43
+ compel_proc_no_controlnet = Compel(
44
+ tokenizer=pipe_no_controlnet.tokenizer,
45
+ text_encoder=pipe_no_controlnet.text_encoder,
46
+ truncate_long_prompts=False,
47
+ )
48
+
49
+ def predict(
50
+ prompt: str,
51
+ image: Image,
52
+ use_controlnet: bool,
53
+ generator: int,
54
+ num_inference_steps: int,
55
+ strength: float,
56
+ guidance_scale: float,
57
+ controlnet_conditioning_scale: float,
58
+ canny_lower_threshold: int,
59
+ canny_higher_threshold: int,
60
+ ):
61
+ if image is None:
62
+ return None
63
+
64
+ generator = torch.manual_seed(generator)
65
+ # TODO: Keep the original ratio?
66
+ image = image.resize((width, height))
67
+
68
+ if use_controlnet:
69
+ prompt_embeds = compel_proc(prompt)
70
+ image_array = numpy.array(image)
71
+ image_array = cv2.Canny(
72
+ image_array,
73
+ canny_lower_threshold,
74
+ canny_higher_threshold
75
+ )
76
+ image_array = image_array[:, :, None]
77
+ image_array = numpy.concatenate([image_array, image_array, image_array], axis=2)
78
+ control_image = Image.fromarray(image_array)
79
+ results = pipe(
80
+ control_image=control_image,
81
+ control_guidance_end=1.0,
82
+ control_guidance_start=0.0,
83
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
84
+ generator=generator,
85
+ guidance_scale=guidance_scale,
86
+ image=image,
87
+ num_inference_steps=num_inference_steps,
88
+ output_type="pil",
89
+ prompt_embeds=prompt_embeds,
90
+ strength=strength,
91
+ )
92
+ control_image.close()
93
+ else:
94
+ prompt_embeds = compel_proc_no_controlnet(prompt)
95
+ results = pipe_no_controlnet(
96
+ generator=generator,
97
+ guidance_scale=guidance_scale,
98
+ image=image,
99
+ num_inference_steps=num_inference_steps,
100
+ output_type="pil",
101
+ prompt_embeds=prompt_embeds,
102
+ strength=strength,
103
+ )
104
+
105
+ gc.collect()
106
+
107
+ if len(results.images) > 0:
108
+ return results.images[0]
109
+ return None
110
+
111
+ app = gradio.Interface(
112
+ fn=predict,
113
+ inputs=[
114
+ gradio.Textbox("Kirisame Marisa, Cute, Smiling, High quality, Realistic"), # prompt
115
+ gradio.Image(type="pil"), # image
116
+ gradio.Checkbox(True), # use_controlnet
117
+ gradio.Slider(0, 2147483647, 2159232, step=1), # generator
118
+ gradio.Slider(2, 15, 4, step=1), # num_inference_steps
119
+ gradio.Slider(0.0, 1.0, 0.5, step=0.01), # strength
120
+ gradio.Slider(0.0, 5.0, 0.2, step=0.01), # guidance_scale
121
+ gradio.Slider(0.0, 1.0, 0.8, step=0.01), # controlnet_conditioning_scale
122
+ gradio.Slider(0, 255, 100, step=1), # canny_lower_threshold
123
+ gradio.Slider(0, 255, 200, step=1), # canny_higher_threshold
124
+ ],
125
+ outputs=gradio.Image(type="pil")
126
+ )
127
+ app.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ diffusers
2
+ accelerate
3
+ compel
4
+ gradio
5
+ numpy
6
+ opencv-python
7
+ transformers