Spaces:
Sleeping
Sleeping
Upload 8 files
Browse files- .gitattributes +1 -0
- Model/keras_metadata.pb +3 -0
- Model/saved_model.pb +3 -0
- Model/variables/variables.data-00000-of-00001 +3 -0
- Model/variables/variables.index +0 -0
- cv_func.py +41 -0
- deploy_app.py +50 -0
- haarcascade_frontalface_default.xml +0 -0
- requirements.txt +4 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Model/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
Model/keras_metadata.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f207a1e2c0f42e257b372b2830feaddb4e8ba4c47ddb86237439c1ed266f46b2
|
3 |
+
size 16505
|
Model/saved_model.pb
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7d3645eb160aafdb2e193c1fc02939ab79f0793de83097898bb545ae8d2a6a4
|
3 |
+
size 146234
|
Model/variables/variables.data-00000-of-00001
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2393a841d5bd4d584110ef69a6b0a7d36289e90100b714f2669d6ee98783502d
|
3 |
+
size 1278565
|
Model/variables/variables.index
ADDED
Binary file (2.26 kB). View file
|
|
cv_func.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import tensorflow as tf
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
def predict(image):
|
8 |
+
model = tf.keras.models.load_model("Model")
|
9 |
+
img = cv2.imread(image)
|
10 |
+
|
11 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
12 |
+
|
13 |
+
haar_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
|
14 |
+
|
15 |
+
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=9)
|
16 |
+
men_count = 0
|
17 |
+
women_count = 0
|
18 |
+
for (x, y, w, h) in faces_rect:
|
19 |
+
cropped_img = img[y:y+h, x:x+w]
|
20 |
+
|
21 |
+
resized_img = cv2.resize(cropped_img,(64,64))
|
22 |
+
image_array = np.expand_dims(resized_img,axis=0)
|
23 |
+
|
24 |
+
prediction = model.predict(image_array)
|
25 |
+
|
26 |
+
|
27 |
+
if prediction[0][0] > 0.5:
|
28 |
+
ans = "Men"
|
29 |
+
men_count+=1
|
30 |
+
else:
|
31 |
+
ans = "Women"
|
32 |
+
women_count+=1
|
33 |
+
|
34 |
+
|
35 |
+
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), thickness=2)
|
36 |
+
cv2.putText(img, ans, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
|
37 |
+
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
38 |
+
|
39 |
+
return rgb_img,men_count,women_count
|
40 |
+
|
41 |
+
|
deploy_app.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import time
|
3 |
+
import tempfile
|
4 |
+
import numpy as tf
|
5 |
+
import streamlit as st
|
6 |
+
import tensorflow as tf
|
7 |
+
from cv_func import predict
|
8 |
+
|
9 |
+
st.title("Gender Detectior using OpenCv and Tensorflow")
|
10 |
+
|
11 |
+
uploader = st.file_uploader(label="Upload the png here: ",type=["jpg","png"])
|
12 |
+
|
13 |
+
if uploader is None:
|
14 |
+
st.warning("⚠️| Hey there! Ready to unveil the mysteries? Upload an image and let's predict some genders!")
|
15 |
+
|
16 |
+
|
17 |
+
if uploader is not None:
|
18 |
+
print("log update: file uploaded!")
|
19 |
+
st.write("⏳ | Brace yourselves! Our top-notch AI detectives are on the case....")
|
20 |
+
time.sleep(3)
|
21 |
+
st.write("⏳ | Analyzing the pixels to uncover the hidden secrets of gender in your image.....")
|
22 |
+
time.sleep(3)
|
23 |
+
st.write("⌛ | This could be the moment we crack the code or just end up with some hilariously unexpected results")
|
24 |
+
time.sleep(3)
|
25 |
+
st.write("✅ | Done!")
|
26 |
+
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
|
27 |
+
temp_file.write(uploader.read())
|
28 |
+
image_filename = temp_file.name
|
29 |
+
|
30 |
+
ans,men,women = predict(image_filename)
|
31 |
+
|
32 |
+
|
33 |
+
st.header("Output:")
|
34 |
+
col1, col2 = st.columns(2,)
|
35 |
+
|
36 |
+
|
37 |
+
with col1:
|
38 |
+
st.header("Men:")
|
39 |
+
st.metric("Count", men)
|
40 |
+
with col2:
|
41 |
+
st.header("Women:")
|
42 |
+
st.metric("Count", women)
|
43 |
+
|
44 |
+
st.image(ans)
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
haarcascade_frontalface_default.xml
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python-opencv
|
2 |
+
tensorflow
|
3 |
+
numpy
|
4 |
+
streamlit
|