ZiqianLiu commited on
Commit
b0e4e5e
1 Parent(s): 4b1db35

Upload 2 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
nodes.py CHANGED
@@ -363,10 +363,8 @@ class RestoreFace:
363
  import numpy as np
364
  from ultralytics import YOLO
365
  from PIL import Image
366
-
367
- # Load a pretrained YOLOv8n model
368
- current_directory = os.getcwd()
369
- model = YOLO(task='detect', model=current_directory + '/custom_nodes/yolov8_face/yolov8m_200e.pt')
370
 
371
  class Mynode_2:
372
  @classmethod
@@ -377,7 +375,7 @@ class Mynode_2:
377
  "source_image": ("IMAGE",),
378
  },
379
  "optional": {
380
-
381
  }
382
  }
383
 
@@ -385,45 +383,93 @@ class Mynode_2:
385
  RETURN_TYPES = ("IMAGE",)
386
  FUNCTION = "method"
387
 
388
- def method(self, input_image, source_image):
389
  input_image_tmp = input_image.squeeze()
390
  # Pytorch张量转PIL对象
391
  input_image_pil = Image.fromarray(
392
  np.clip(255. * input_image_tmp.cpu().numpy(), 0, 255).astype(np.uint8)).convert('RGBA')
393
 
394
- # Run inference on an image
395
- results = model.predict(source=input_image_pil, conf=0.5)
396
- # View results
397
- tmp = results[0].boxes.shape
398
- judge_face = tmp[0]
399
- print(judge_face)
400
-
401
- if judge_face == 0: # 等于0就是没检测出脸
402
-
403
- return (input_image,)
404
-
405
- else:
406
- enabled = True
407
-
408
- swap_model = "inswapper_128.onnx"
409
- facedetection = "retinaface_resnet50"
410
- face_restore_model = "GFPGANv1.4.pth"
411
- face_restore_visibility = 1
412
- codeformer_weight = 0.5
413
- detect_gender_input = "no"
414
- detect_gender_source = "no"
415
- input_faces_index = "0"
416
- source_faces_index = "0"
417
- console_log_level = 1
418
-
419
- class_reactor = reactor()
420
- change_face_img, face_model = class_reactor.execute(enabled, input_image, swap_model, detect_gender_source, detect_gender_input,
421
- source_faces_index, input_faces_index, console_log_level, face_restore_model,
422
- face_restore_visibility, codeformer_weight, facedetection, source_image=source_image,
423
- face_model=None)
424
-
425
- return (change_face_img,)
426
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
427
  NODE_CLASS_MAPPINGS = {
428
  "ReActorFaceSwap": reactor,
429
  "ReActorLoadFaceModel": LoadFaceModel,
 
363
  import numpy as np
364
  from ultralytics import YOLO
365
  from PIL import Image
366
+ import dlib
367
+ import cv2
 
 
368
 
369
  class Mynode_2:
370
  @classmethod
 
375
  "source_image": ("IMAGE",),
376
  },
377
  "optional": {
378
+ "detection_type": (["Face", "Eyes"],{"default": "Face"},),
379
  }
380
  }
381
 
 
383
  RETURN_TYPES = ("IMAGE",)
384
  FUNCTION = "method"
385
 
386
+ def method(self, input_image, source_image, detection_type):
387
  input_image_tmp = input_image.squeeze()
388
  # Pytorch张量转PIL对象
389
  input_image_pil = Image.fromarray(
390
  np.clip(255. * input_image_tmp.cpu().numpy(), 0, 255).astype(np.uint8)).convert('RGBA')
391
 
392
+ img = input_image_pil
393
+ if img.mode != 'RGB':
394
+ img_rgb = img.convert('RGB')
395
+ image_rgn_numpy = np.array(img_rgb)
396
+ # 因为dlib的探测器在灰度图像上运行,所以要转成灰度图像来减少计算复杂度
397
+ gray = cv2.cvtColor(image_rgn_numpy, cv2.COLOR_RGB2GRAY)
398
+
399
+ # 初始化Reactor参数
400
+ enabled = True
401
+ swap_model = "inswapper_128.onnx"
402
+ facedetection = "retinaface_resnet50"
403
+ face_restore_model = "GFPGANv1.4.pth"
404
+ face_restore_visibility = 1
405
+ codeformer_weight = 0.5
406
+ detect_gender_input = "no"
407
+ detect_gender_source = "no"
408
+ input_faces_index = "0"
409
+ source_faces_index = "0"
410
+ console_log_level = 1
411
+
412
+ current_directory = os.getcwd()
413
+ # 加载dlib的正脸检测器
414
+ detector = dlib.get_frontal_face_detector()
415
+ # 加载官方提供的模型用于识别面部关键点
416
+ predictor = dlib.shape_predictor(current_directory + '/custom_nodes/yolov8_face/shape_predictor_68_face_landmarks.dat')
417
+
418
+ if detection_type == "Face":
419
+ # model = YOLO(task='detect', model=current_directory + '/custom_nodes/yolov8_face/yolov8m_200e.pt')
420
+
421
+ # # Run inference on an image
422
+ # results = model.predict(source=input_image_pil, conf=0.5)
423
+ # # View results
424
+ # tmp = results[0].boxes.shape
425
+ # judge_face = tmp[0]
426
+
427
+ # 检测灰度图像中的面部,第二个参数是图片的放大倍数
428
+ faces = detector(gray, 1)
429
+ if len(faces) == 0:
430
+ print("未检测到脸部,直接返回原图")
431
+ return (input_image,)
432
+ if len(faces) > 0:
433
+ print("已检测到脸部,准备开始换脸")
434
+ class_reactor = reactor()
435
+ change_face_img, face_model = class_reactor.execute(enabled, input_image, swap_model, detect_gender_source, detect_gender_input,
436
+ source_faces_index, input_faces_index, console_log_level, face_restore_model,
437
+ face_restore_visibility, codeformer_weight, facedetection, source_image=source_image,
438
+ face_model=None)
439
+ return (change_face_img,)
440
+
441
+ if detection_type == "Eyes":
442
+ faces = detector(gray)
443
+ left_eye = []
444
+ right_eye = []
445
+ for face in faces:
446
+ landmarks = predictor(gray, face)
447
+ # 获得左眼的标志点(第37到第42点)
448
+ for n in range(36, 42):
449
+ x = landmarks.part(n).x
450
+ y = landmarks.part(n).y
451
+ left_eye.append((x, y))
452
+
453
+ # 获得右眼的标志点(第43到第48点)
454
+ for n in range(42, 48):
455
+ x = landmarks.part(n).x
456
+ y = landmarks.part(n).y
457
+ right_eye.append((x, y))
458
+
459
+ # 如果检测到至少有一个眼睛,就可以认为图片中存在眼睛
460
+ if len(left_eye) > 0 or len(right_eye) > 0:
461
+ print("已检测到眼睛,准备开始换脸")
462
+ class_reactor = reactor()
463
+ change_face_img, face_model = class_reactor.execute(enabled, input_image, swap_model, detect_gender_source, detect_gender_input,
464
+ source_faces_index, input_faces_index, console_log_level, face_restore_model,
465
+ face_restore_visibility, codeformer_weight, facedetection, source_image=source_image,
466
+ face_model=None)
467
+ return (change_face_img,)
468
+
469
+ if len(left_eye) == 0 or len(right_eye) == 0:
470
+ print("未检测到眼睛,直接返回原图")
471
+ return (input_image,)
472
+
473
  NODE_CLASS_MAPPINGS = {
474
  "ReActorFaceSwap": reactor,
475
  "ReActorLoadFaceModel": LoadFaceModel,
shape_predictor_68_face_landmarks.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
3
+ size 99693937