Doron Adler commited on
Commit
d6c8575
β€’
1 Parent(s): 212a881

* Return a sharpened version of the image, using an unsharp mask

Browse files
.gitattributes CHANGED
@@ -25,5 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- u2net_bce_itr_18000_train_3.891670_tar_0.553700_512x_460x.jit.pt filter=lfs diff=lfs merge=lfs -text
29
  shape_predictor_5_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ u2net_bce_itr_25000_train_3.856416_tar_0.547567-400x_360x.jit.pt filter=lfs diff=lfs merge=lfs -text
29
  shape_predictor_5_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
Sample00001.jpg CHANGED
Sample00002.jpg CHANGED
Sample00003.jpg CHANGED
Sample00004.jpg CHANGED
Sample00005.jpg CHANGED
Sample00006.jpg CHANGED
app.py CHANGED
@@ -6,12 +6,27 @@ import face_detection
6
  import PIL
7
  from PIL import Image, ImageOps, ImageFile
8
  import numpy as np
9
-
10
  import torch
 
11
  torch.set_grad_enabled(False)
12
- model = torch.jit.load('u2net_bce_itr_18000_train_3.891670_tar_0.553700_512x_460x.jit.pt')
13
  model.eval()
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def normPRED(d):
16
  ma = np.max(d)
17
  mi = np.min(d)
@@ -20,6 +35,12 @@ def normPRED(d):
20
 
21
  return dn
22
 
 
 
 
 
 
 
23
  def array_to_image(array_in):
24
  array_in = normPRED(array_in)
25
  array_in = np.squeeze(255.0*(array_in))
@@ -45,11 +66,11 @@ def image_as_array(image_in):
45
  image_out = np.expand_dims(tmpImg, 0)
46
  return image_out
47
 
48
- def find_aligned_face(image_in, size=512):
49
  aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
50
  return aligned_image, n_faces, quad
51
 
52
- def align_first_face(image_in, size=512):
53
  aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
54
  if n_faces == 0:
55
  try:
@@ -82,14 +103,17 @@ def face2doll(
82
  else:
83
  input = torch.Tensor(aligned_img)
84
  results = model(input)
85
- d2 = array_to_image(results[1].detach().numpy())
86
- output = img_concat_h(array_to_image(aligned_img), d2)
 
 
 
87
  del results
88
 
89
  return output
90
 
91
  def inference(img):
92
- out = face2doll(img, 512)
93
  return out
94
 
95
 
 
6
  import PIL
7
  from PIL import Image, ImageOps, ImageFile
8
  import numpy as np
9
+ import cv2 as cv
10
  import torch
11
+
12
  torch.set_grad_enabled(False)
13
+ model = torch.jit.load('u2net_bce_itr_25000_train_3.856416_tar_0.547567-400x_360x.jit.pt')
14
  model.eval()
15
 
16
+ # https://en.wikipedia.org/wiki/Unsharp_masking
17
+ # https://stackoverflow.com/a/55590133/1495606
18
+ def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=2.0, threshold=0):
19
+ """Return a sharpened version of the image, using an unsharp mask."""
20
+ blurred = cv.GaussianBlur(image, kernel_size, sigma)
21
+ sharpened = float(amount + 1) * image - float(amount) * blurred
22
+ sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
23
+ sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
24
+ sharpened = sharpened.round().astype(np.uint8)
25
+ if threshold > 0:
26
+ low_contrast_mask = np.absolute(image - blurred) < threshold
27
+ np.copyto(sharpened, image, where=low_contrast_mask)
28
+ return sharpened
29
+
30
  def normPRED(d):
31
  ma = np.max(d)
32
  mi = np.min(d)
 
35
 
36
  return dn
37
 
38
+ def array_to_np(array_in):
39
+ array_in = normPRED(array_in)
40
+ array_in = np.squeeze(255.0*(array_in))
41
+ array_in = np.transpose(array_in, (1, 2, 0))
42
+ return array_in
43
+
44
  def array_to_image(array_in):
45
  array_in = normPRED(array_in)
46
  array_in = np.squeeze(255.0*(array_in))
 
66
  image_out = np.expand_dims(tmpImg, 0)
67
  return image_out
68
 
69
+ def find_aligned_face(image_in, size=400):
70
  aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
71
  return aligned_image, n_faces, quad
72
 
73
+ def align_first_face(image_in, size=400):
74
  aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
75
  if n_faces == 0:
76
  try:
 
103
  else:
104
  input = torch.Tensor(aligned_img)
105
  results = model(input)
106
+ doll_np_image = array_to_np(results[1].detach().numpy())
107
+ doll_image = unsharp_mask(doll_np_image)
108
+ doll_image = Image.fromarray(doll_image)
109
+
110
+ output = img_concat_h(array_to_image(aligned_img), doll_image)
111
  del results
112
 
113
  return output
114
 
115
  def inference(img):
116
+ out = face2doll(img, 400)
117
  return out
118
 
119
 
u2net_bce_itr_18000_train_3.891670_tar_0.553700_512x_460x.jit.pt β†’ u2net_bce_itr_25000_train_3.856416_tar_0.547567-400x_360x.jit.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4759db0de410078514769bdae185cf80b095e8a5b49aef31987ad5fa6e66a30
3
- size 177290264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b8d7427e71fd7cb303ffffaef7a42c7c7f95aa77f846fa4cccb4130fcfbbf74
3
+ size 177193974