Upload app.py
Browse files
app.py
CHANGED
@@ -54,7 +54,8 @@ tokenizer = CLIPTokenizer.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE"
|
|
54 |
text_encoder = CLIPTextModel.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="text_encoder").to(dtype=torch.float16, device=args.device)
|
55 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained("h94/IP-Adapter", subfolder="models/image_encoder").to(dtype=torch.float16, device=args.device)
|
56 |
unet = UNet2DConditionModel.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="unet").to(dtype=torch.float16,device=args.device)
|
57 |
-
|
|
|
58 |
#face_model
|
59 |
app = FaceAnalysis(model_path="buffalo_l", providers=[('CUDAExecutionProvider', {"device_id": args.device})]) ##使用GPU:0, 默认使用buffalo_l就可以了
|
60 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
@@ -235,8 +236,7 @@ def dress_process(garm_img, face_img, pose_img, prompt, cloth_guidance_scale, ca
|
|
235 |
).images
|
236 |
|
237 |
if if_post and if_ipa:
|
238 |
-
|
239 |
-
model_revision='v1.0.3')
|
240 |
output_array = np.array(output[0])
|
241 |
|
242 |
bgr_array = cv2.cvtColor(output_array, cv2.COLOR_RGB2BGR)
|
@@ -256,6 +256,8 @@ face_list = os.listdir(os.path.join(example_path,"face"))
|
|
256 |
face_list_path = [os.path.join(example_path,"face",face) for face in face_list]
|
257 |
|
258 |
pose_list = os.listdir(os.path.join(example_path,"pose"))
|
|
|
|
|
259 |
pose_list_path = [os.path.join(example_path,"pose",pose) for pose in pose_list]
|
260 |
|
261 |
def process_image(image):
|
|
|
54 |
text_encoder = CLIPTextModel.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="text_encoder").to(dtype=torch.float16, device=args.device)
|
55 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained("h94/IP-Adapter", subfolder="models/image_encoder").to(dtype=torch.float16, device=args.device)
|
56 |
unet = UNet2DConditionModel.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="unet").to(dtype=torch.float16,device=args.device)
|
57 |
+
image_face_fusion = pipeline('face_fusion_torch', model='damo/cv_unet_face_fusion_torch',
|
58 |
+
model_revision='v1.0.3')
|
59 |
#face_model
|
60 |
app = FaceAnalysis(model_path="buffalo_l", providers=[('CUDAExecutionProvider', {"device_id": args.device})]) ##使用GPU:0, 默认使用buffalo_l就可以了
|
61 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
|
|
236 |
).images
|
237 |
|
238 |
if if_post and if_ipa:
|
239 |
+
|
|
|
240 |
output_array = np.array(output[0])
|
241 |
|
242 |
bgr_array = cv2.cvtColor(output_array, cv2.COLOR_RGB2BGR)
|
|
|
256 |
face_list_path = [os.path.join(example_path,"face",face) for face in face_list]
|
257 |
|
258 |
pose_list = os.listdir(os.path.join(example_path,"pose"))
|
259 |
+
print(pose_list)
|
260 |
+
print('====', len(pose_list))
|
261 |
pose_list_path = [os.path.join(example_path,"pose",pose) for pose in pose_list]
|
262 |
|
263 |
def process_image(image):
|