justin2341 commited on
Commit
36f0d8c
1 Parent(s): 40217d7

Upload 12 files

Browse files
Files changed (13) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +21 -8
  3. app.py +335 -0
  4. demo.py +153 -0
  5. facebox.py +11 -0
  6. facesdk.py +25 -0
  7. libfacesdk1.so +3 -0
  8. libimutils.so +0 -0
  9. libimutils.so_for_ubuntu22 +0 -0
  10. license.txt +5 -0
  11. readme.txt +9 -0
  12. requirements.txt +6 -0
  13. run.sh +5 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ libfacesdk1.so filter=lfs diff=lfs merge=lfs -text
Dockerfile CHANGED
@@ -1,8 +1,21 @@
1
- FROM openvino/ubuntu20_runtime:2022.3.0
2
- USER root
3
-
4
- RUN apt-get update -y
5
- RUN apt-get install -y libcurl4-openssl-dev libssl-dev libgomp1 libpugixml-dev
6
-
7
- RUN mkdir -p /root/kby-ai-live
8
- WORKDIR /root/kby-ai-live
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM openvino/ubuntu20_runtime:2022.3.0
2
+ USER root
3
+
4
+ RUN apt-get update -y
5
+ RUN apt-get install -y libcurl4-openssl-dev libssl-dev libgomp1 libpugixml-dev
6
+
7
+ RUN mkdir -p /root/kby-ai-live
8
+ WORKDIR /root/kby-ai-live
9
+ COPY ./libfacesdk1.so .
10
+ COPY ./libimutils.so /usr/lib/libimutils.so
11
+ COPY ./facesdk.py .
12
+ COPY ./facebox.py .
13
+ COPY ./app.py .
14
+ COPY ./demo.py .
15
+ COPY ./run.sh .
16
+ COPY ./live_examples ./live_examples
17
+ COPY ./requirements.txt .
18
+ COPY ./data ./data
19
+ RUN pip3 install -r requirements.txt
20
+ CMD ["./run.sh"]
21
+ EXPOSE 8088 9000
app.py ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('.')
3
+
4
+ import os
5
+ import numpy as np
6
+ import base64
7
+ import io
8
+
9
+ from PIL import Image, ExifTags
10
+ from flask import Flask, request, jsonify
11
+ from flask_cors import CORS
12
+ from facesdk import getMachineCode
13
+ from facesdk import setActivation
14
+ from facesdk import faceDetection
15
+ from facesdk import initSDK
16
+ from facebox import FaceBox
17
+
18
+ licensePath = "license.txt"
19
+ license = ""
20
+
21
+ # Get a specific environment variable by name
22
+ license = os.environ.get("LICENSE")
23
+
24
+ # Check if the variable exists
25
+ if license is not None:
26
+ print("Value of LICENSE:", license)
27
+ else:
28
+ license = ""
29
+ try:
30
+ with open(licensePath, 'r') as file:
31
+ license = file.read().strip()
32
+ except IOError as exc:
33
+ print("failed to open license.txt: ", exc.errno)
34
+ print("license: ", license)
35
+
36
+ livenessThreshold = 0.7
37
+ yawThreshold = 10
38
+ pitchThreshold = 10
39
+ rollThreshold = 10
40
+ occlusionThreshold = 0.9
41
+ eyeClosureThreshold = 0.8
42
+ mouthOpeningThreshold = 0.5
43
+ borderRate = 0.05
44
+ smallFaceThreshold = 100
45
+ lowQualityThreshold = 0.3
46
+ hightQualityThreshold = 0.7
47
+ luminanceDarkThreshold = 50
48
+ luminanceLightThreshold = 200
49
+
50
+ maxFaceCount = 10
51
+
52
+ machineCode = getMachineCode()
53
+ print("machineCode: ", machineCode.decode('utf-8'))
54
+
55
+ ret = setActivation(license.encode('utf-8'))
56
+ print("activation: ", ret)
57
+
58
+ ret = initSDK("data".encode('utf-8'))
59
+ print("init: ", ret)
60
+
61
+ app = Flask(__name__)
62
+ CORS(app)
63
+
64
+ def apply_exif_rotation(image):
65
+ # Get the EXIF data
66
+ try:
67
+ exif = image._getexif()
68
+ if exif is not None:
69
+ for orientation in ExifTags.TAGS.keys():
70
+ if ExifTags.TAGS[orientation] == 'Orientation':
71
+ break
72
+
73
+ # Get the orientation value
74
+ orientation = exif.get(orientation, None)
75
+
76
+ # Apply the appropriate rotation based on the orientation
77
+ if orientation == 3:
78
+ image = image.rotate(180, expand=True)
79
+ elif orientation == 6:
80
+ image = image.rotate(270, expand=True)
81
+ elif orientation == 8:
82
+ image = image.rotate(90, expand=True)
83
+
84
+ except AttributeError:
85
+ print("No EXIF data found")
86
+
87
+ return image
88
+
89
+ @app.route('/check_liveness', methods=['POST'])
90
+ def check_liveness():
91
+ faces = []
92
+ isNotFront = None
93
+ isOcclusion = None
94
+ isEyeClosure = None
95
+ isMouthOpening = None
96
+ isBoundary = None
97
+ isSmall = None
98
+ quality = None
99
+ luminance = None
100
+ livenessScore = None
101
+
102
+ file = request.files['file']
103
+
104
+ try:
105
+ #image = apply_exif_rotation(Image.open(file)).convert('RGB')
106
+ image = Image.open(file)).convert('RGB')
107
+ except:
108
+ result = "Failed to open file"
109
+ faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
110
+ "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
111
+ response = jsonify({"face_state": faceState, "faces": faces})
112
+
113
+ response.status_code = 200
114
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
115
+ return response
116
+
117
+
118
+ image_np = np.asarray(image)
119
+
120
+ faceBoxes = (FaceBox * maxFaceCount)()
121
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
122
+ if faceCount == 0:
123
+ image = image.rotate(90, expand=True)
124
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
125
+ if faceCount == 0:
126
+ image = image.rotate(90, expand=True)
127
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
128
+ if faceCount == 0:
129
+ image = image.rotate(90, expand=True)
130
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
131
+
132
+
133
+ for i in range(faceCount):
134
+ landmark_68 = []
135
+ for j in range(68):
136
+ landmark_68.append({"x": faceBoxes[i].landmark_68[j * 2], "y": faceBoxes[i].landmark_68[j * 2 + 1]})
137
+ faces.append({"x1": faceBoxes[i].x1, "y1": faceBoxes[i].y1, "x2": faceBoxes[i].x2, "y2": faceBoxes[i].y2,
138
+ "liveness": faceBoxes[i].liveness,
139
+ "yaw": faceBoxes[i].yaw, "roll": faceBoxes[i].roll, "pitch": faceBoxes[i].pitch,
140
+ "face_quality": faceBoxes[i].face_quality, "face_luminance": faceBoxes[i].face_luminance, "eye_dist": faceBoxes[i].eye_dist,
141
+ "left_eye_closed": faceBoxes[i].left_eye_closed, "right_eye_closed": faceBoxes[i].right_eye_closed,
142
+ "face_occlusion": faceBoxes[i].face_occlusion, "mouth_opened": faceBoxes[i].mouth_opened,
143
+ "landmark_68": landmark_68})
144
+
145
+ result = ""
146
+ if faceCount == 0:
147
+ result = "No face"
148
+ # elif faceCount > 1:
149
+ # result = "Multiple face"
150
+ elif faceCount < 0:
151
+ result = "License error!"
152
+ else:
153
+ livenessScore = faceBoxes[0].liveness
154
+ if livenessScore > livenessThreshold:
155
+ result = "Real"
156
+ else:
157
+ result = "Spoof"
158
+
159
+ isNotFront = True
160
+ isOcclusion = False
161
+ isEyeClosure = False
162
+ isMouthOpening = False
163
+ isBoundary = False
164
+ isSmall = False
165
+ quality = "Low"
166
+ luminance = "Dark"
167
+ if abs(faceBoxes[0].yaw) < yawThreshold and abs(faceBoxes[0].roll) < rollThreshold and abs(faceBoxes[0].pitch) < pitchThreshold:
168
+ isNotFront = False
169
+
170
+ if faceBoxes[0].face_occlusion > occlusionThreshold:
171
+ isOcclusion = True
172
+
173
+ if faceBoxes[0].left_eye_closed > eyeClosureThreshold or faceBoxes[0].right_eye_closed > eyeClosureThreshold:
174
+ isEyeClosure = True
175
+
176
+ if faceBoxes[0].mouth_opened > mouthOpeningThreshold:
177
+ isMouthOpening = True
178
+
179
+ if (faceBoxes[0].x1 < image_np.shape[1] * borderRate or
180
+ faceBoxes[0].y1 < image_np.shape[0] * borderRate or
181
+ faceBoxes[0].x1 > image_np.shape[1] - image_np.shape[1] * borderRate or
182
+ faceBoxes[0].x1 > image_np.shape[0] - image_np.shape[0] * borderRate):
183
+ isBoundary = True
184
+
185
+ if faceBoxes[0].eye_dist < smallFaceThreshold:
186
+ isSmall = True
187
+
188
+ if faceBoxes[0].face_quality < lowQualityThreshold:
189
+ quality = "Low"
190
+ elif faceBoxes[0].face_quality < hightQualityThreshold:
191
+ quality = "Medium"
192
+ else:
193
+ quality = "High"
194
+
195
+ if faceBoxes[0].face_luminance < luminanceDarkThreshold:
196
+ luminance = "Dark"
197
+ elif faceBoxes[0].face_luminance < luminanceLightThreshold:
198
+ luminance = "Normal"
199
+ else:
200
+ luminance = "Light"
201
+
202
+ faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
203
+ "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
204
+ response = jsonify({"face_state": faceState, "faces": faces})
205
+
206
+ response.status_code = 200
207
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
208
+ return response
209
+
210
+ @app.route('/check_liveness_base64', methods=['POST'])
211
+ def check_liveness_base64():
212
+ faces = []
213
+ isNotFront = None
214
+ isOcclusion = None
215
+ isEyeClosure = None
216
+ isMouthOpening = None
217
+ isBoundary = None
218
+ isSmall = None
219
+ quality = None
220
+ luminance = None
221
+ livenessScore = None
222
+
223
+ content = request.get_json()
224
+
225
+ try:
226
+ imageBase64 = content['base64']
227
+ image_data = base64.b64decode(imageBase64)
228
+ #image = apply_exif_rotation(Image.open(io.BytesIO(image_data))).convert("RGB")
229
+ image = Image.open(io.BytesIO(image_data)).convert("RGB")
230
+ except:
231
+ result = "Failed to open file"
232
+ faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
233
+ "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
234
+ response = jsonify({"face_state": faceState, "faces": faces})
235
+
236
+ response.status_code = 200
237
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
238
+ return response
239
+
240
+
241
+ image_np = np.asarray(image)
242
+
243
+ faceBoxes = (FaceBox * maxFaceCount)()
244
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
245
+ if faceCount == 0:
246
+ image = image.rotate(90, expand=True)
247
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
248
+ if faceCount == 0:
249
+ image = image.rotate(90, expand=True)
250
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
251
+ if faceCount == 0:
252
+ image = image.rotate(90, expand=True)
253
+ faceCount = faceDetection(image_np, image_np.shape[1], image_np.shape[0], faceBoxes, maxFaceCount)
254
+
255
+ for i in range(faceCount):
256
+ landmark_68 = []
257
+ for j in range(68):
258
+ landmark_68.append({"x": faceBoxes[i].landmark_68[j * 2], "y": faceBoxes[i].landmark_68[j * 2 + 1]})
259
+ faces.append({"x1": faceBoxes[i].x1, "y1": faceBoxes[i].y1, "x2": faceBoxes[i].x2, "y2": faceBoxes[i].y2,
260
+ "liveness": faceBoxes[i].liveness,
261
+ "yaw": faceBoxes[i].yaw, "roll": faceBoxes[i].roll, "pitch": faceBoxes[i].pitch,
262
+ "face_quality": faceBoxes[i].face_quality, "face_luminance": faceBoxes[i].face_luminance, "eye_dist": faceBoxes[i].eye_dist,
263
+ "left_eye_closed": faceBoxes[i].left_eye_closed, "right_eye_closed": faceBoxes[i].right_eye_closed,
264
+ "face_occlusion": faceBoxes[i].face_occlusion, "mouth_opened": faceBoxes[i].mouth_opened,
265
+ "landmark_68": landmark_68})
266
+
267
+ result = ""
268
+ if faceCount == 0:
269
+ result = "No face"
270
+ # elif faceCount > 1:
271
+ # result = "Multiple face"
272
+ elif faceCount < 0:
273
+ result = "License error!"
274
+ else:
275
+ livenessScore = faceBoxes[0].liveness
276
+ if livenessScore > livenessThreshold:
277
+ result = "Real"
278
+ else:
279
+ result = "Spoof"
280
+
281
+ isNotFront = True
282
+ isOcclusion = False
283
+ isEyeClosure = False
284
+ isMouthOpening = False
285
+ isBoundary = False
286
+ isSmall = False
287
+ quality = "Low"
288
+ luminance = "Dark"
289
+ if abs(faceBoxes[0].yaw) < yawThreshold and abs(faceBoxes[0].roll) < rollThreshold and abs(faceBoxes[0].pitch) < pitchThreshold:
290
+ isNotFront = False
291
+
292
+ if faceBoxes[0].face_occlusion > occlusionThreshold:
293
+ isOcclusion = True
294
+
295
+ if faceBoxes[0].left_eye_closed > eyeClosureThreshold or faceBoxes[0].right_eye_closed > eyeClosureThreshold:
296
+ isEyeClosure = True
297
+
298
+ if faceBoxes[0].mouth_opened > mouthOpeningThreshold:
299
+ isMouthOpening = True
300
+
301
+ if (faceBoxes[0].x1 < image_np.shape[1] * borderRate or
302
+ faceBoxes[0].y1 < image_np.shape[0] * borderRate or
303
+ faceBoxes[0].x1 > image_np.shape[1] - image_np.shape[1] * borderRate or
304
+ faceBoxes[0].x1 > image_np.shape[0] - image_np.shape[0] * borderRate):
305
+ isBoundary = True
306
+
307
+ if faceBoxes[0].eye_dist < smallFaceThreshold:
308
+ isSmall = True
309
+
310
+ if faceBoxes[0].face_quality < lowQualityThreshold:
311
+ quality = "Low"
312
+ elif faceBoxes[0].face_quality < hightQualityThreshold:
313
+ quality = "Medium"
314
+ else:
315
+ quality = "High"
316
+
317
+ if faceBoxes[0].face_luminance < luminanceDarkThreshold:
318
+ luminance = "Dark"
319
+ elif faceBoxes[0].face_luminance < luminanceLightThreshold:
320
+ luminance = "Normal"
321
+ else:
322
+ luminance = "Light"
323
+
324
+ faceState = {"is_not_front": isNotFront, "is_occluded": isOcclusion, "eye_closed": isEyeClosure, "mouth_opened": isMouthOpening,
325
+ "is_boundary_face": isBoundary, "is_small": isSmall, "quality": quality, "luminance": luminance, "result": result, "liveness_score": livenessScore}
326
+ response = jsonify({"face_state": faceState, "faces": faces})
327
+
328
+ response.status_code = 200
329
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
330
+ return response
331
+
332
+
333
+ if __name__ == '__main__':
334
+ port = int(os.environ.get("PORT", 8080))
335
+ app.run(host='0.0.0.0', port=port)
demo.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ import datadog_api_client
4
+ from PIL import Image
5
+
6
+ def check_liveness(frame):
7
+ url = "http://127.0.0.1:8080/check_liveness"
8
+ file = {'file': open(frame, 'rb')}
9
+
10
+ r = requests.post(url=url, files=file)
11
+ result = r.json().get('face_state').get('result')
12
+
13
+ html = None
14
+ faces = None
15
+ if r.json().get('face_state').get('is_not_front') is not None:
16
+ liveness_score = r.json().get('face_state').get('liveness_score')
17
+ eye_closed = r.json().get('face_state').get('eye_closed')
18
+ is_boundary_face = r.json().get('face_state').get('is_boundary_face')
19
+ is_not_front = r.json().get('face_state').get('is_not_front')
20
+ is_occluded = r.json().get('face_state').get('is_occluded')
21
+ is_small = r.json().get('face_state').get('is_small')
22
+ luminance = r.json().get('face_state').get('luminance')
23
+ mouth_opened = r.json().get('face_state').get('mouth_opened')
24
+ quality = r.json().get('face_state').get('quality')
25
+
26
+ html = ("<table>"
27
+ "<tr>"
28
+ "<th>Face State</th>"
29
+ "<th>Value</th>"
30
+ "</tr>"
31
+ "<tr>"
32
+ "<td>Result</td>"
33
+ "<td>{result}</td>"
34
+ "</tr>"
35
+ "<tr>"
36
+ "<td>Liveness Score</td>"
37
+ "<td>{liveness_score}</td>"
38
+ "</tr>"
39
+ "<tr>"
40
+ "<td>Quality</td>"
41
+ "<td>{quality}</td>"
42
+ "</tr>"
43
+ "<tr>"
44
+ "<td>Luminance</td>"
45
+ "<td>{luminance}</td>"
46
+ "</tr>"
47
+ "<tr>"
48
+ "<td>Is Small</td>"
49
+ "<td>{is_small}</td>"
50
+ "</tr>"
51
+ "<tr>"
52
+ "<td>Is Boundary</td>"
53
+ "<td>{is_boundary_face}</td>"
54
+ "</tr>"
55
+ "<tr>"
56
+ "<td>Is Not Front</td>"
57
+ "<td>{is_not_front}</td>"
58
+ "</tr>"
59
+ "<tr>"
60
+ "<td>Face Occluded</td>"
61
+ "<td>{is_occluded}</td>"
62
+ "</tr>"
63
+ "<tr>"
64
+ "<td>Eye Closed</td>"
65
+ "<td>{eye_closed}</td>"
66
+ "</tr>"
67
+ "<tr>"
68
+ "<td>Mouth Opened</td>"
69
+ "<td>{mouth_opened}</td>"
70
+ "</tr>"
71
+ "</table>".format(liveness_score=liveness_score, quality=quality, luminance=luminance, is_small=is_small, is_boundary_face=is_boundary_face,
72
+ is_not_front=is_not_front, is_occluded=is_occluded, eye_closed=eye_closed, mouth_opened=mouth_opened, result=result))
73
+
74
+ else:
75
+ html = ("<table>"
76
+ "<tr>"
77
+ "<th>Face State</th>"
78
+ "<th>Value</th>"
79
+ "</tr>"
80
+ "<tr>"
81
+ "<td>Result</td>"
82
+ "<td>{result}</td>"
83
+ "</tr>"
84
+ "</table>".format(result=result))
85
+
86
+ try:
87
+ image = Image.open(frame)
88
+
89
+ for face in r.json().get('faces'):
90
+ x1 = face.get('x1')
91
+ y1 = face.get('y1')
92
+ x2 = face.get('x2')
93
+ y2 = face.get('y2')
94
+
95
+ if x1 < 0:
96
+ x1 = 0
97
+ if y1 < 0:
98
+ y1 = 0
99
+ if x2 >= image.width:
100
+ x2 = image.width - 1
101
+ if y2 >= image.height:
102
+ y2 = image.height - 1
103
+
104
+ face_image = image.crop((x1, y1, x2, y2))
105
+ face_image_ratio = face_image.width / float(face_image.height)
106
+ resized_w = int(face_image_ratio * 150)
107
+ resized_h = 150
108
+
109
+ face_image = face_image.resize((int(resized_w), int(resized_h)))
110
+
111
+ if faces is None:
112
+ faces = face_image
113
+ else:
114
+ new_image = Image.new('RGB',(faces.width + face_image.width + 10, 150), (80,80,80))
115
+
116
+ new_image.paste(faces,(0,0))
117
+ new_image.paste(face_image,(faces.width + 10, 0))
118
+ faces = new_image.copy()
119
+ except:
120
+ pass
121
+
122
+ return [faces, html]
123
+
124
+ with gr.Blocks() as demo:
125
+ gr.Markdown(
126
+ """
127
+ # KBY-AI
128
+ We offer SDKs for Face Recognition, Face Liveness Detection(Face Anti-Spoofing), and ID Card Recognition.<br/>
129
+ Besides that, we can provide several AI models and development services in machine learning.
130
+
131
+ ## Simple Installation & Simple API
132
+ ```
133
+ sudo docker pull kbyai/face-liveness-detection:latest
134
+ sudo docker run -e LICENSE="xxxxx" -p 8080:8080 -p 9000:9000 kbyai/face-liveness-detection:latest
135
+ ```
136
+ ## KYC Verification Demo
137
+ https://github.com/kby-ai/KYC-Verification
138
+ """
139
+ )
140
+ with gr.TabItem("Face Liveness Detection"):
141
+ with gr.Row():
142
+ with gr.Column():
143
+ live_image_input = gr.Image(type='filepath')
144
+ gr.Examples(['live_examples/1.jpg', 'live_examples/2.jpg', 'live_examples/3.jpg', 'live_examples/4.jpg'],
145
+ inputs=live_image_input)
146
+ check_liveness_button = gr.Button("Check Liveness")
147
+ with gr.Column():
148
+ liveness_face_output = gr.Image(type="pil").style(height=150)
149
+ livness_result_output = gr.HTML()
150
+
151
+ check_liveness_button.click(check_liveness, inputs=live_image_input, outputs=[liveness_face_output, livness_result_output])
152
+
153
+ demo.launch(server_name="0.0.0.0", server_port=9000)
facebox.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ctypes import *
2
+
3
+ class FaceBox(Structure):
4
+ _fields_ = [("x1", c_int32), ("y1", c_int32), ("x2", c_int32), ("y2", c_int32),
5
+ ("liveness", c_float),
6
+ ("yaw", c_float), ("roll", c_float), ("pitch", c_float),
7
+ ("face_quality", c_float), ("face_luminance", c_float), ("eye_dist", c_float),
8
+ ("left_eye_closed", c_float), ("right_eye_closed", c_float),
9
+ ("face_occlusion", c_float), ("mouth_opened", c_float),
10
+ ("landmark_68", c_float * 136)
11
+ ]
facesdk.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from ctypes import *
4
+ from numpy.ctypeslib import ndpointer
5
+ from facebox import FaceBox
6
+
7
+ libPath = os.path.abspath(os.path.dirname(__file__)) + '/libfacesdk1.so'
8
+ facesdk = cdll.LoadLibrary(libPath)
9
+
10
+ getMachineCode = facesdk.getMachineCode
11
+ getMachineCode.argtypes = []
12
+ getMachineCode.restype = c_char_p
13
+
14
+ setActivation = facesdk.setActivation
15
+ setActivation.argtypes = [c_char_p]
16
+ setActivation.restype = c_int32
17
+
18
+ initSDK = facesdk.initSDK
19
+ initSDK.argtypes = [c_char_p]
20
+ initSDK.restype = c_int32
21
+
22
+ faceDetection = facesdk.faceDetection
23
+ faceDetection.argtypes = [ndpointer(c_ubyte, flags='C_CONTIGUOUS'), c_int32, c_int32, POINTER(FaceBox), c_int32]
24
+ faceDetection.restype = c_int32
25
+
libfacesdk1.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08a30277cfea156f31dfb5d9ac589d5db8362e5b63f32fa88e2045ffde4ef658
3
+ size 5111592
libimutils.so ADDED
Binary file (412 kB). View file
 
libimutils.so_for_ubuntu22 ADDED
Binary file (412 kB). View file
 
license.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ DFS2z+xIsvbAV0ZdS37aH68Sba8sPqW1QJc4RdxYg7aTrFgxZBGSMnwD0Hr6KC01EAhV0yM14UxQ
2
+ NJT4a2ygemfrLW9zp1jOqjD+eWJNVZvUQHffnsCVJSKzvoo/Qu3BkYh2I8yJyAbW/QhdhN4Bt9yy
3
+ EYzNRNW3iY/FM5CDLDeblSTx+iX5egoYLn1icsMyp6gbanY5kO5G7rclgGUaK5f0Pzjukwm1eGpe
4
+ q9EkVd0iZkQaXAwEc+HCYRthS6GlTOl2qJIYyTPSEAX0QGF9ajVgWi4CUw6cdMfeFsGJbh3QFA0w
5
+ gyO9k0z/MGsLrEmwSg4TkZl4UP9HCSxswok4AQ==
readme.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ####docker build
2
+ sudo docker build --pull --rm -f Dockerfile -t kby-ai-live:latest .
3
+
4
+
5
+ #### docker run with online license
6
+ sudo docker run -e LICENSE="xxxxx" -p 8080:8080 -p 9000:9000 kby-ai-live
7
+
8
+ #### docker run with offline license
9
+ sudo docker run -v ./license.txt:/root/kby-ai-live/license.txt -p 8080:8080 -p 9000:9000 kby-ai-live
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ flask
2
+ flask-cors
3
+ Pillow
4
+ numpy
5
+ gradio==3.50.2
6
+ datadog_api_client
run.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ cd /root/kby-ai-live
4
+ exec python3 demo.py &
5
+ exec python3 app.py