Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,537 @@
|
|
|
|
|
|
1 |
import os
|
2 |
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
os.
|
7 |
-
os.
|
8 |
-
os.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spaces
|
2 |
+
import sys
|
3 |
import os
|
4 |
|
5 |
+
|
6 |
+
|
7 |
+
# os.system(f"git clone https://github.com/Curt-Park/yolo-world-with-efficientvit-sam.git")
|
8 |
+
# cwd0 = os.getcwd()
|
9 |
+
# cwd1 = os.path.join(cwd0, "yolo-world-with-efficientvit-sam")
|
10 |
+
# os.chdir(cwd1)
|
11 |
+
# os.system("make setup")
|
12 |
+
# os.system(f"cd /home/user/app")
|
13 |
+
|
14 |
+
sys.path.append('./')
|
15 |
+
import gradio as gr
|
16 |
+
import random
|
17 |
+
import numpy as np
|
18 |
+
from gradio_demo.character_template import character_man, lorapath_man
|
19 |
+
from gradio_demo.character_template import character_woman, lorapath_woman
|
20 |
+
from gradio_demo.character_template import styles, lorapath_styles
|
21 |
+
import torch
|
22 |
+
import os
|
23 |
+
from typing import Tuple, List
|
24 |
+
import copy
|
25 |
+
import argparse
|
26 |
+
from diffusers.utils import load_image
|
27 |
+
import cv2
|
28 |
+
from PIL import Image, ImageOps
|
29 |
+
from transformers import DPTFeatureExtractor, DPTForDepthEstimation
|
30 |
+
# from controlnet_aux import OpenposeDetector
|
31 |
+
# from controlnet_aux.open_pose.body import Body
|
32 |
+
|
33 |
+
try:
|
34 |
+
from inference.models import YOLOWorld
|
35 |
+
from src.efficientvit.models.efficientvit.sam import EfficientViTSamPredictor
|
36 |
+
from src.efficientvit.sam_model_zoo import create_sam_model
|
37 |
+
import supervision as sv
|
38 |
+
except:
|
39 |
+
print("YoloWorld can not be load")
|
40 |
+
|
41 |
+
try:
|
42 |
+
from groundingdino.models import build_model
|
43 |
+
from groundingdino.util import box_ops
|
44 |
+
from groundingdino.util.slconfig import SLConfig
|
45 |
+
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
|
46 |
+
from groundingdino.util.inference import annotate, predict
|
47 |
+
from segment_anything import build_sam, SamPredictor
|
48 |
+
import groundingdino.datasets.transforms as T
|
49 |
+
except:
|
50 |
+
print("groundingdino can not be load")
|
51 |
+
|
52 |
+
from src.pipelines.lora_pipeline import LoraMultiConceptPipeline
|
53 |
+
from src.prompt_attention.p2p_attention import AttentionReplace
|
54 |
+
from diffusers import ControlNetModel, StableDiffusionXLPipeline
|
55 |
+
from src.pipelines.lora_pipeline import revise_regionally_controlnet_forward
|
56 |
+
|
57 |
+
from download import OMG_download
|
58 |
+
|
59 |
+
CHARACTER_MAN_NAMES = list(character_man.keys())
|
60 |
+
CHARACTER_WOMAN_NAMES = list(character_woman.keys())
|
61 |
+
STYLE_NAMES = list(styles.keys())
|
62 |
+
MAX_SEED = np.iinfo(np.int32).max
|
63 |
+
|
64 |
+
### Description
|
65 |
+
title = r"""
|
66 |
+
<h1 align="center">OMG: Occlusion-friendly Personalized Multi-concept Generation In Diffusion Models</h1>
|
67 |
+
"""
|
68 |
+
|
69 |
+
description = r"""
|
70 |
+
<b>Official 🤗 Gradio demo</b> for <a href='https://github.com/' target='_blank'><b>OMG: Occlusion-friendly Personalized Multi-concept Generation In Diffusion Models</b></a>.<br>
|
71 |
+
How to use:<br>
|
72 |
+
1. Select two characters.
|
73 |
+
2. Enter a text prompt as done in normal text-to-image models.
|
74 |
+
3. Click the <b>Submit</b> button to start customizing.
|
75 |
+
4. Enjoy the generated image😊!
|
76 |
+
"""
|
77 |
+
|
78 |
+
article = r"""
|
79 |
+
---
|
80 |
+
📝 **Citation**
|
81 |
+
<br>
|
82 |
+
If our work is helpful for your research or applications, please cite us via:
|
83 |
+
```bibtex
|
84 |
+
@article{,
|
85 |
+
title={OMG: Occlusion-friendly Personalized Multi-concept Generation In Diffusion Models},
|
86 |
+
author={},
|
87 |
+
journal={},
|
88 |
+
year={}
|
89 |
+
}
|
90 |
+
```
|
91 |
+
"""
|
92 |
+
|
93 |
+
tips = r"""
|
94 |
+
### Usage tips of OMG
|
95 |
+
1. Input text prompts to describe a man and a woman
|
96 |
+
"""
|
97 |
+
|
98 |
+
css = '''
|
99 |
+
.gradio-container {width: 85% !important}
|
100 |
+
'''
|
101 |
+
|
102 |
+
def sample_image(pipe,
|
103 |
+
input_prompt,
|
104 |
+
input_neg_prompt=None,
|
105 |
+
generator=None,
|
106 |
+
concept_models=None,
|
107 |
+
num_inference_steps=50,
|
108 |
+
guidance_scale=7.5,
|
109 |
+
controller=None,
|
110 |
+
stage=None,
|
111 |
+
region_masks=None,
|
112 |
+
lora_list = None,
|
113 |
+
styleL=None,
|
114 |
+
**extra_kargs
|
115 |
+
):
|
116 |
+
|
117 |
+
spatial_condition = extra_kargs.pop('spatial_condition')
|
118 |
+
if spatial_condition is not None:
|
119 |
+
spatial_condition_input = [spatial_condition] * len(input_prompt)
|
120 |
+
else:
|
121 |
+
spatial_condition_input = None
|
122 |
+
|
123 |
+
images = pipe(
|
124 |
+
prompt=input_prompt,
|
125 |
+
concept_models=concept_models,
|
126 |
+
negative_prompt=input_neg_prompt,
|
127 |
+
generator=generator,
|
128 |
+
guidance_scale=guidance_scale,
|
129 |
+
num_inference_steps=num_inference_steps,
|
130 |
+
cross_attention_kwargs={"scale": 0.8},
|
131 |
+
controller=controller,
|
132 |
+
stage=stage,
|
133 |
+
region_masks=region_masks,
|
134 |
+
lora_list=lora_list,
|
135 |
+
styleL=styleL,
|
136 |
+
image=spatial_condition_input,
|
137 |
+
**extra_kargs).images
|
138 |
+
|
139 |
+
return images
|
140 |
+
|
141 |
+
def load_image_yoloworld(image_source) -> Tuple[np.array, torch.Tensor]:
|
142 |
+
image = np.asarray(image_source)
|
143 |
+
return image
|
144 |
+
|
145 |
+
def load_image_dino(image_source) -> Tuple[np.array, torch.Tensor]:
|
146 |
+
transform = T.Compose(
|
147 |
+
[
|
148 |
+
T.RandomResize([800], max_size=1333),
|
149 |
+
T.ToTensor(),
|
150 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
151 |
+
]
|
152 |
+
)
|
153 |
+
image = np.asarray(image_source)
|
154 |
+
image_transformed, _ = transform(image_source, None)
|
155 |
+
return image, image_transformed
|
156 |
+
|
157 |
+
def predict_mask(segmentmodel, sam, image, TEXT_PROMPT, segmentType, confidence = 0.2, threshold = 0.5):
|
158 |
+
if segmentType=='GroundingDINO':
|
159 |
+
image_source, image = load_image_dino(image)
|
160 |
+
boxes, logits, phrases = predict(
|
161 |
+
model=segmentmodel,
|
162 |
+
image=image,
|
163 |
+
caption=TEXT_PROMPT,
|
164 |
+
box_threshold=0.3,
|
165 |
+
text_threshold=0.25
|
166 |
+
)
|
167 |
+
sam.set_image(image_source)
|
168 |
+
H, W, _ = image_source.shape
|
169 |
+
boxes_xyxy = box_ops.box_cxcywh_to_xyxy(boxes) * torch.Tensor([W, H, W, H])
|
170 |
+
|
171 |
+
transformed_boxes = sam.transform.apply_boxes_torch(boxes_xyxy, image_source.shape[:2]).cuda()
|
172 |
+
masks, _, _ = sam.predict_torch(
|
173 |
+
point_coords=None,
|
174 |
+
point_labels=None,
|
175 |
+
boxes=transformed_boxes,
|
176 |
+
multimask_output=False,
|
177 |
+
)
|
178 |
+
masks=masks[0].squeeze(0)
|
179 |
+
else:
|
180 |
+
image_source = load_image_yoloworld(image)
|
181 |
+
segmentmodel.set_classes([TEXT_PROMPT])
|
182 |
+
results = segmentmodel.infer(image_source, confidence=confidence)
|
183 |
+
detections = sv.Detections.from_inference(results).with_nms(
|
184 |
+
class_agnostic=True, threshold=threshold
|
185 |
+
)
|
186 |
+
masks = None
|
187 |
+
if len(detections) != 0:
|
188 |
+
print(TEXT_PROMPT + " detected!")
|
189 |
+
sam.set_image(image_source, image_format="RGB")
|
190 |
+
masks, _, _ = sam.predict(box=detections.xyxy[0], multimask_output=False)
|
191 |
+
masks = torch.from_numpy(masks.squeeze())
|
192 |
+
|
193 |
+
return masks
|
194 |
+
|
195 |
+
def prepare_text(prompt, region_prompts):
|
196 |
+
'''
|
197 |
+
Args:
|
198 |
+
prompt_entity: [subject1]-*-[attribute1]-*-[Location1]|[subject2]-*-[attribute2]-*-[Location2]|[global text]
|
199 |
+
Returns:
|
200 |
+
full_prompt: subject1, attribute1 and subject2, attribute2, global text
|
201 |
+
context_prompt: subject1 and subject2, global text
|
202 |
+
entity_collection: [(subject1, attribute1), Location1]
|
203 |
+
'''
|
204 |
+
region_collection = []
|
205 |
+
|
206 |
+
regions = region_prompts.split('|')
|
207 |
+
|
208 |
+
for region in regions:
|
209 |
+
if region == '':
|
210 |
+
break
|
211 |
+
prompt_region, neg_prompt_region = region.split('-*-')
|
212 |
+
prompt_region = prompt_region.replace('[', '').replace(']', '')
|
213 |
+
neg_prompt_region = neg_prompt_region.replace('[', '').replace(']', '')
|
214 |
+
|
215 |
+
region_collection.append((prompt_region, neg_prompt_region))
|
216 |
+
return (prompt, region_collection)
|
217 |
+
|
218 |
+
|
219 |
+
def build_model_sd(pretrained_model, controlnet_path, device, prompts):
|
220 |
+
controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16).to(device)
|
221 |
+
pipe = LoraMultiConceptPipeline.from_pretrained(
|
222 |
+
pretrained_model, controlnet=controlnet, torch_dtype=torch.float16, variant="fp16").to(device)
|
223 |
+
controller = AttentionReplace(prompts, 50, cross_replace_steps={"default_": 1.}, self_replace_steps=0.4, tokenizer=pipe.tokenizer, device=device, dtype=torch.float16, width=1024//32, height=1024//32)
|
224 |
+
revise_regionally_controlnet_forward(pipe.unet, controller)
|
225 |
+
pipe_concept = StableDiffusionXLPipeline.from_pretrained(pretrained_model, torch_dtype=torch.float16,
|
226 |
+
variant="fp16").to(device)
|
227 |
+
return pipe, controller, pipe_concept
|
228 |
+
|
229 |
+
def build_model_lora(pipe_concept, lora_paths, style_path, condition, args, pipe):
|
230 |
+
pipe_list = []
|
231 |
+
if condition == "Human pose":
|
232 |
+
controlnet = ControlNetModel.from_pretrained(args.openpose_checkpoint, torch_dtype=torch.float16).to(device)
|
233 |
+
pipe.controlnet = controlnet
|
234 |
+
elif condition == "Canny Edge":
|
235 |
+
controlnet = ControlNetModel.from_pretrained(args.canny_checkpoint, torch_dtype=torch.float16, variant="fp16").to(device)
|
236 |
+
pipe.controlnet = controlnet
|
237 |
+
elif condition == "Depth":
|
238 |
+
controlnet = ControlNetModel.from_pretrained(args.depth_checkpoint, torch_dtype=torch.float16).to(device)
|
239 |
+
pipe.controlnet = controlnet
|
240 |
+
|
241 |
+
if style_path is not None and os.path.exists(style_path):
|
242 |
+
pipe_concept.load_lora_weights(style_path, weight_name="pytorch_lora_weights.safetensors", adapter_name='style')
|
243 |
+
pipe.load_lora_weights(style_path, weight_name="pytorch_lora_weights.safetensors", adapter_name='style')
|
244 |
+
|
245 |
+
for lora_path in lora_paths.split('|'):
|
246 |
+
adapter_name = lora_path.split('/')[-1].split('.')[0]
|
247 |
+
pipe_concept.load_lora_weights(lora_path, weight_name="pytorch_lora_weights.safetensors", adapter_name=adapter_name)
|
248 |
+
pipe_concept.enable_xformers_memory_efficient_attention()
|
249 |
+
pipe_list.append(adapter_name)
|
250 |
+
return pipe_list
|
251 |
+
|
252 |
+
def build_yolo_segment_model(sam_path, device):
|
253 |
+
yolo_world = YOLOWorld(model_id="yolo_world/l")
|
254 |
+
sam = EfficientViTSamPredictor(
|
255 |
+
create_sam_model(name="xl1", weight_url=sam_path).to(device).eval()
|
256 |
+
)
|
257 |
+
return yolo_world, sam
|
258 |
+
|
259 |
+
def load_model_hf(repo_id, filename, ckpt_config_filename, device='cpu'):
|
260 |
+
args = SLConfig.fromfile(ckpt_config_filename)
|
261 |
+
model = build_model(args)
|
262 |
+
args.device = device
|
263 |
+
|
264 |
+
checkpoint = torch.load(os.path.join(repo_id, filename), map_location='cpu')
|
265 |
+
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
|
266 |
+
print("Model loaded from {} \n => {}".format(filename, log))
|
267 |
+
_ = model.eval()
|
268 |
+
return model
|
269 |
+
|
270 |
+
def build_dino_segment_model(ckpt_repo_id, sam_checkpoint):
|
271 |
+
ckpt_filenmae = "groundingdino_swinb_cogcoor.pth"
|
272 |
+
ckpt_config_filename = os.path.join(ckpt_repo_id, "GroundingDINO_SwinB.cfg.py")
|
273 |
+
groundingdino_model = load_model_hf(ckpt_repo_id, ckpt_filenmae, ckpt_config_filename)
|
274 |
+
sam = build_sam(checkpoint=sam_checkpoint)
|
275 |
+
sam.cuda()
|
276 |
+
sam_predictor = SamPredictor(sam)
|
277 |
+
return groundingdino_model, sam_predictor
|
278 |
+
|
279 |
+
def resize_and_center_crop(image, output_size=(1024, 576)):
|
280 |
+
width, height = image.size
|
281 |
+
aspect_ratio = width / height
|
282 |
+
new_height = output_size[1]
|
283 |
+
new_width = int(aspect_ratio * new_height)
|
284 |
+
|
285 |
+
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
286 |
+
|
287 |
+
if new_width < output_size[0] or new_height < output_size[1]:
|
288 |
+
padding_color = "gray"
|
289 |
+
resized_image = ImageOps.expand(resized_image,
|
290 |
+
((output_size[0] - new_width) // 2,
|
291 |
+
(output_size[1] - new_height) // 2,
|
292 |
+
(output_size[0] - new_width + 1) // 2,
|
293 |
+
(output_size[1] - new_height + 1) // 2),
|
294 |
+
fill=padding_color)
|
295 |
+
|
296 |
+
left = (resized_image.width - output_size[0]) / 2
|
297 |
+
top = (resized_image.height - output_size[1]) / 2
|
298 |
+
right = (resized_image.width + output_size[0]) / 2
|
299 |
+
bottom = (resized_image.height + output_size[1]) / 2
|
300 |
+
|
301 |
+
cropped_image = resized_image.crop((left, top, right, bottom))
|
302 |
+
|
303 |
+
return cropped_image
|
304 |
+
|
305 |
+
def main(device, segment_type):
|
306 |
+
pipe, controller, pipe_concept = build_model_sd(args.pretrained_sdxl_model, args.openpose_checkpoint, device, prompts_tmp)
|
307 |
+
|
308 |
+
# if segment_type == 'GroundingDINO':
|
309 |
+
# detect_model, sam = build_dino_segment_model(args.dino_checkpoint, args.sam_checkpoint)
|
310 |
+
# else:
|
311 |
+
# detect_model, sam = build_yolo_segment_model(args.efficientViT_checkpoint, device)
|
312 |
+
|
313 |
+
resolution_list = ["1440*728",
|
314 |
+
"1344*768",
|
315 |
+
"1216*832",
|
316 |
+
"1152*896",
|
317 |
+
"1024*1024",
|
318 |
+
"896*1152",
|
319 |
+
"832*1216",
|
320 |
+
"768*1344",
|
321 |
+
"728*1440"]
|
322 |
+
ratio_list = [1440 / 728, 1344 / 768, 1216 / 832, 1152 / 896, 1024 / 1024, 896 / 1152, 832 / 1216, 768 / 1344,
|
323 |
+
728 / 1440]
|
324 |
+
condition_list = ["None",
|
325 |
+
"Human pose",
|
326 |
+
"Canny Edge",
|
327 |
+
"Depth"]
|
328 |
+
|
329 |
+
depth_estimator = DPTForDepthEstimation.from_pretrained(args.dpt_checkpoint).to("cuda")
|
330 |
+
feature_extractor = DPTFeatureExtractor.from_pretrained(args.dpt_checkpoint)
|
331 |
+
# body_model = Body(args.pose_detector_checkpoint)
|
332 |
+
# openpose = OpenposeDetector(body_model)
|
333 |
+
|
334 |
+
def remove_tips():
|
335 |
+
return gr.update(visible=False)
|
336 |
+
|
337 |
+
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
338 |
+
if randomize_seed:
|
339 |
+
seed = random.randint(0, MAX_SEED)
|
340 |
+
return seed
|
341 |
+
|
342 |
+
def get_humanpose(img):
|
343 |
+
openpose_image = openpose(img)
|
344 |
+
return openpose_image
|
345 |
+
|
346 |
+
def get_cannyedge(image):
|
347 |
+
image = np.array(image)
|
348 |
+
image = cv2.Canny(image, 100, 200)
|
349 |
+
image = image[:, :, None]
|
350 |
+
image = np.concatenate([image, image, image], axis=2)
|
351 |
+
canny_image = Image.fromarray(image)
|
352 |
+
return canny_image
|
353 |
+
|
354 |
+
def get_depth(image):
|
355 |
+
image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda")
|
356 |
+
with torch.no_grad(), torch.autocast("cuda"):
|
357 |
+
depth_map = depth_estimator(image).predicted_depth
|
358 |
+
|
359 |
+
depth_map = torch.nn.functional.interpolate(
|
360 |
+
depth_map.unsqueeze(1),
|
361 |
+
size=(1024, 1024),
|
362 |
+
mode="bicubic",
|
363 |
+
align_corners=False,
|
364 |
+
)
|
365 |
+
depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)
|
366 |
+
depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)
|
367 |
+
depth_map = (depth_map - depth_min) / (depth_max - depth_min)
|
368 |
+
image = torch.cat([depth_map] * 3, dim=1)
|
369 |
+
image = image.permute(0, 2, 3, 1).cpu().numpy()[0]
|
370 |
+
image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))
|
371 |
+
return image
|
372 |
+
|
373 |
+
@spaces.GPU
|
374 |
+
def generate_image(prompt1, negative_prompt, man, woman, resolution, local_prompt1, local_prompt2, seed, condition, condition_img1, style):
|
375 |
+
try:
|
376 |
+
path1 = lorapath_man[man]
|
377 |
+
path2 = lorapath_woman[woman]
|
378 |
+
pipe_concept.unload_lora_weights()
|
379 |
+
pipe.unload_lora_weights()
|
380 |
+
pipe_list = build_model_lora(pipe_concept, path1 + "|" + path2, lorapath_styles[style], condition, args, pipe)
|
381 |
+
|
382 |
+
if lorapath_styles[style] is not None and os.path.exists(lorapath_styles[style]):
|
383 |
+
styleL = True
|
384 |
+
else:
|
385 |
+
styleL = False
|
386 |
+
|
387 |
+
input_list = [prompt1]
|
388 |
+
condition_list = [condition_img1]
|
389 |
+
output_list = []
|
390 |
+
|
391 |
+
width, height = int(resolution.split("*")[0]), int(resolution.split("*")[1])
|
392 |
+
|
393 |
+
kwargs = {
|
394 |
+
'height': height,
|
395 |
+
'width': width,
|
396 |
+
}
|
397 |
+
|
398 |
+
for prompt, condition_img in zip(input_list, condition_list):
|
399 |
+
if prompt!='':
|
400 |
+
input_prompt = []
|
401 |
+
p = '{prompt}, 35mm photograph, film, professional, 4k, highly detailed.'
|
402 |
+
if styleL:
|
403 |
+
p = styles[style] + p
|
404 |
+
input_prompt.append([p.replace("{prompt}", prompt), p.replace("{prompt}", prompt)])
|
405 |
+
if styleL:
|
406 |
+
input_prompt.append([(styles[style] + local_prompt1, character_man.get(man)[1]),
|
407 |
+
(styles[style] + local_prompt2, character_woman.get(woman)[1])])
|
408 |
+
else:
|
409 |
+
input_prompt.append([(local_prompt1, character_man.get(man)[1]),
|
410 |
+
(local_prompt2, character_woman.get(woman)[1])])
|
411 |
+
|
412 |
+
if condition == 'Human pose' and condition_img is not None:
|
413 |
+
index = ratio_list.index(
|
414 |
+
min(ratio_list, key=lambda x: abs(x - condition_img.shape[1] / condition_img.shape[0])))
|
415 |
+
resolution = resolution_list[index]
|
416 |
+
width, height = int(resolution.split("*")[0]), int(resolution.split("*")[1])
|
417 |
+
kwargs['height'] = height
|
418 |
+
kwargs['width'] = width
|
419 |
+
condition_img = resize_and_center_crop(Image.fromarray(condition_img), (width, height))
|
420 |
+
spatial_condition = get_humanpose(condition_img)
|
421 |
+
elif condition == 'Canny Edge' and condition_img is not None:
|
422 |
+
index = ratio_list.index(
|
423 |
+
min(ratio_list, key=lambda x: abs(x - condition_img.shape[1] / condition_img.shape[0])))
|
424 |
+
resolution = resolution_list[index]
|
425 |
+
width, height = int(resolution.split("*")[0]), int(resolution.split("*")[1])
|
426 |
+
kwargs['height'] = height
|
427 |
+
kwargs['width'] = width
|
428 |
+
condition_img = resize_and_center_crop(Image.fromarray(condition_img), (width, height))
|
429 |
+
spatial_condition = get_cannyedge(condition_img)
|
430 |
+
elif condition == 'Depth' and condition_img is not None:
|
431 |
+
index = ratio_list.index(
|
432 |
+
min(ratio_list, key=lambda x: abs(x - condition_img.shape[1] / condition_img.shape[0])))
|
433 |
+
resolution = resolution_list[index]
|
434 |
+
width, height = int(resolution.split("*")[0]), int(resolution.split("*")[1])
|
435 |
+
kwargs['height'] = height
|
436 |
+
kwargs['width'] = width
|
437 |
+
condition_img = resize_and_center_crop(Image.fromarray(condition_img), (width, height))
|
438 |
+
spatial_condition = get_depth(condition_img)
|
439 |
+
else:
|
440 |
+
spatial_condition = None
|
441 |
+
|
442 |
+
kwargs['spatial_condition'] = spatial_condition
|
443 |
+
controller.reset()
|
444 |
+
image = sample_image(
|
445 |
+
pipe,
|
446 |
+
input_prompt=input_prompt,
|
447 |
+
concept_models=pipe_concept,
|
448 |
+
input_neg_prompt=[negative_prompt] * len(input_prompt),
|
449 |
+
generator=torch.Generator(device).manual_seed(seed),
|
450 |
+
controller=controller,
|
451 |
+
stage=1,
|
452 |
+
lora_list=pipe_list,
|
453 |
+
styleL=styleL,
|
454 |
+
**kwargs)
|
455 |
+
|
456 |
+
controller.reset()
|
457 |
+
if pipe.tokenizer("man")["input_ids"][1] in pipe.tokenizer(args.prompt)["input_ids"][1:-1]:
|
458 |
+
mask1 = predict_mask(detect_model, sam, image[0], 'man', args.segment_type, confidence=0.15,
|
459 |
+
threshold=0.5)
|
460 |
+
else:
|
461 |
+
mask1 = None
|
462 |
+
|
463 |
+
if pipe.tokenizer("woman")["input_ids"][1] in pipe.tokenizer(args.prompt)["input_ids"][1:-1]:
|
464 |
+
mask2 = predict_mask(detect_model, sam, image[0], 'woman', args.segment_type, confidence=0.15,
|
465 |
+
threshold=0.5)
|
466 |
+
else:
|
467 |
+
mask2 = None
|
468 |
+
|
469 |
+
if mask1 is None and mask2 is None:
|
470 |
+
output_list.append(image[1])
|
471 |
+
else:
|
472 |
+
image = sample_image(
|
473 |
+
pipe,
|
474 |
+
input_prompt=input_prompt,
|
475 |
+
concept_models=pipe_concept,
|
476 |
+
input_neg_prompt=[negative_prompt] * len(input_prompt),
|
477 |
+
generator=torch.Generator(device).manual_seed(seed),
|
478 |
+
controller=controller,
|
479 |
+
stage=2,
|
480 |
+
region_masks=[mask1, mask2],
|
481 |
+
lora_list=pipe_list,
|
482 |
+
styleL=styleL,
|
483 |
+
**kwargs)
|
484 |
+
output_list.append(image[1])
|
485 |
+
else:
|
486 |
+
output_list.append(None)
|
487 |
+
output_list.append(spatial_condition)
|
488 |
+
return output_list
|
489 |
+
except:
|
490 |
+
print("error")
|
491 |
+
return
|
492 |
+
|
493 |
+
def get_local_value_man(input):
|
494 |
+
return character_man[input][0]
|
495 |
+
|
496 |
+
def get_local_value_woman(input):
|
497 |
+
return character_woman[input][0]
|
498 |
+
|
499 |
+
@spaces.GPU
|
500 |
+
def generate(prompt):
|
501 |
+
print(os.system(prompt))
|
502 |
+
return prompt
|
503 |
+
|
504 |
+
gr.Interface(
|
505 |
+
fn=generate,
|
506 |
+
inputs=gr.Text(),
|
507 |
+
outputs=gr.Gallery(),
|
508 |
+
).launch()
|
509 |
+
|
510 |
+
|
511 |
+
|
512 |
+
def parse_args():
|
513 |
+
parser = argparse.ArgumentParser('', add_help=False)
|
514 |
+
parser.add_argument('--pretrained_sdxl_model', default='Fucius/stable-diffusion-xl-base-1.0', type=str)
|
515 |
+
parser.add_argument('--openpose_checkpoint', default='thibaud/controlnet-openpose-sdxl-1.0', type=str)
|
516 |
+
parser.add_argument('--canny_checkpoint', default='diffusers/controlnet-canny-sdxl-1.0', type=str)
|
517 |
+
parser.add_argument('--depth_checkpoint', default='diffusers/controlnet-depth-sdxl-1.0', type=str)
|
518 |
+
parser.add_argument('--efficientViT_checkpoint', default='../checkpoint/sam/xl1.pt', type=str)
|
519 |
+
parser.add_argument('--dino_checkpoint', default='./checkpoint/GroundingDINO', type=str)
|
520 |
+
parser.add_argument('--sam_checkpoint', default='./checkpoint/sam/sam_vit_h_4b8939.pth', type=str)
|
521 |
+
parser.add_argument('--dpt_checkpoint', default='Intel/dpt-hybrid-midas', type=str)
|
522 |
+
parser.add_argument('--pose_detector_checkpoint', default='../checkpoint/ControlNet/annotator/ckpts/body_pose_model.pth', type=str)
|
523 |
+
parser.add_argument('--prompt', default='Close-up photo of the cool man and beautiful woman in surprised expressions as they accidentally discover a mysterious island while on vacation by the sea, 35mm photograph, film, professional, 4k, highly detailed.', type=str)
|
524 |
+
parser.add_argument('--negative_prompt', default='noisy, blurry, soft, deformed, ugly', type=str)
|
525 |
+
parser.add_argument('--seed', default=22, type=int)
|
526 |
+
parser.add_argument('--suffix', default='', type=str)
|
527 |
+
parser.add_argument('--segment_type', default='yoloworld', help='GroundingDINO or yoloworld', type=str)
|
528 |
+
return parser.parse_args()
|
529 |
+
|
530 |
+
if __name__ == '__main__':
|
531 |
+
args = parse_args()
|
532 |
+
|
533 |
+
prompts = [args.prompt]*2
|
534 |
+
prompts_tmp = copy.deepcopy(prompts)
|
535 |
+
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
536 |
+
download = OMG_download()
|
537 |
+
main(device, args.segment_type)
|