File size: 2,135 Bytes
1f72938
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9312707
 
 
1f72938
9312707
1f72938
 
 
 
9312707
 
 
 
 
1f72938
 
 
 
 
9312707
1f72938
9312707
 
1f72938
 
 
 
 
 
 
 
 
9312707
 
 
 
1f72938
 
 
 
 
 
 
9312707
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# This program is designed to auto crop the face on a given image
# It is required to change the image into gray format to satisfy the pre-trained model requirement

import cv2
import numpy as np
import os

import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision

import cv2

from pathlib import Path

# auto crop the image in the given dir

base_options = python.BaseOptions(model_asset_path='blaze_face_short_range.tflite')
options = vision.FaceDetectorOptions(base_options=base_options)
detector = vision.FaceDetector.create_from_options(options)

def crop(
    image,
    detection_result
):
  # annotated_image = image.copy()
  # height, width, _ = image.shape

  print(image.shape)
  # Here assume we only detect one face
  for detection in detection_result.detections:
    # Crop detected face
    bbox = detection.bounding_box
    print(f'bbox {bbox}')
    cropped_img = image[bbox.origin_y: bbox.origin_y + bbox.height, bbox.origin_x:bbox.origin_x + bbox.width]
    # cropped_img = image[bbox.origin_y - 90: bbox.origin_y + bbox.height + 30, bbox.origin_x - 80:bbox.origin_x + bbox.width + 35]
    print(f'crop: {cropped_img}')
    return cropped_img

def auto_cropping(dir):

  files = os.listdir(dir) # list of files in directory

  print(files)
  for file in files:
      if file == "DONT_DELETE.txt":
        continue
      file_dir = Path(dir + "/" + file)
      abs_path = file_dir.resolve()

      img = mp.Image.create_from_file(str(abs_path))

      detection_result = detector.detect(img)        

      image_copy = np.copy(img.numpy_view())
      annotated_image = crop(image_copy, detection_result)

      print('hello')
      print(annotated_image)

      rgb_annotated_image = cv2.cvtColor(annotated_image, cv2.COLOR_BGR2RGB)
  
  return rgb_annotated_image

# auto_cropping("image") # <----------- !!!!change address here!!!! ------------------> #

# The current problem (6/2/2023) is that the model may recognize some cartoon face as human face,
# my idea is to use another model to classify if the cropped image is real human face 

# print(auto_cropping("image"))