Spaces:
Running
Running
Justin Grammens
commited on
Commit
•
08e7ad4
1
Parent(s):
6955362
added in the eye segmentation code
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
from PIL import Image
|
|
|
|
|
4 |
|
5 |
# Function to classify the face shape
|
6 |
def classify_face_shape(image):
|
@@ -71,7 +73,8 @@ def classify_eye_shape(image):
|
|
71 |
pipe = pipeline("image-classification", model="justingrammens/eye-shape")
|
72 |
|
73 |
# Run the pipeline on the uploaded image
|
74 |
-
output = pipe(image)
|
|
|
75 |
|
76 |
print("Pipeline output for eye shape:", output)
|
77 |
# Format the output to be compatible with gr.outputs.Label
|
@@ -84,7 +87,8 @@ def classify_eye_color(image):
|
|
84 |
pipe = pipeline("image-classification", model="justingrammens/eye-color")
|
85 |
|
86 |
# Run the pipeline on the uploaded image
|
87 |
-
output = pipe(image)
|
|
|
88 |
|
89 |
print("Pipeline output for eye color:", output)
|
90 |
# Format the output to be compatible with gr.outputs.Label
|
@@ -92,8 +96,15 @@ def classify_eye_color(image):
|
|
92 |
|
93 |
return formatted_output
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
def classify_image_with_multiple_models(image):
|
|
|
97 |
face_shape_result = classify_face_shape(image)
|
98 |
age_result = classify_age(image)
|
99 |
skin_type_result = classify_skin_type(image)
|
@@ -105,6 +116,54 @@ def classify_image_with_multiple_models(image):
|
|
105 |
return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color
|
106 |
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
# Create the Gradio interface
|
109 |
demo = gr.Interface(
|
110 |
fn=classify_image_with_multiple_models, # The function to run
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
from PIL import Image
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
|
7 |
# Function to classify the face shape
|
8 |
def classify_face_shape(image):
|
|
|
73 |
pipe = pipeline("image-classification", model="justingrammens/eye-shape")
|
74 |
|
75 |
# Run the pipeline on the uploaded image
|
76 |
+
#output = pipe(image)
|
77 |
+
output = pipe("eye_regions.jpg") # use the eye_regions image instead
|
78 |
|
79 |
print("Pipeline output for eye shape:", output)
|
80 |
# Format the output to be compatible with gr.outputs.Label
|
|
|
87 |
pipe = pipeline("image-classification", model="justingrammens/eye-color")
|
88 |
|
89 |
# Run the pipeline on the uploaded image
|
90 |
+
#output = pipe(image)
|
91 |
+
output = pipe("eye_regions.jpg") #use the eye_regions image instead
|
92 |
|
93 |
print("Pipeline output for eye color:", output)
|
94 |
# Format the output to be compatible with gr.outputs.Label
|
|
|
96 |
|
97 |
return formatted_output
|
98 |
|
99 |
+
|
100 |
+
def process_gradio_image(pil_image):
|
101 |
+
# Convert PIL image to NumPy array
|
102 |
+
image = np.array(pil_image)
|
103 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # Convert RGB (from PIL) to BGR (OpenCV default)
|
104 |
+
return image
|
105 |
|
106 |
def classify_image_with_multiple_models(image):
|
107 |
+
create_eye_region(image)
|
108 |
face_shape_result = classify_face_shape(image)
|
109 |
age_result = classify_age(image)
|
110 |
skin_type_result = classify_skin_type(image)
|
|
|
116 |
return face_shape_result, age_result, skin_type_result, acne_results, hair_color_results, eye_shape, eye_color
|
117 |
|
118 |
|
119 |
+
def create_eye_region(image):
|
120 |
+
# Load the pre-trained face detector
|
121 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
122 |
+
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
|
123 |
+
|
124 |
+
image = process_gradio_image(image)
|
125 |
+
# Convert the image to grayscale
|
126 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
127 |
+
|
128 |
+
# Detect faces in the image
|
129 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
130 |
+
|
131 |
+
for (x, y, w, h) in faces:
|
132 |
+
# Draw a rectangle around the face
|
133 |
+
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
|
134 |
+
|
135 |
+
# Region of Interest (ROI) for the face
|
136 |
+
roi_gray = gray[y:y + h, x:x + w]
|
137 |
+
roi_color = image[y:y + h, x:x + w]
|
138 |
+
|
139 |
+
# Detect eyes in the face ROI
|
140 |
+
eyes = eye_cascade.detectMultiScale(roi_gray)
|
141 |
+
|
142 |
+
for (ex, ey, ew, eh) in eyes:
|
143 |
+
# Draw a rectangle around the eyes
|
144 |
+
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
|
145 |
+
|
146 |
+
# Extract the eye region
|
147 |
+
eye_roi = roi_color[ey:ey + eh, ex:ex + ew]
|
148 |
+
cv2.imwrite('eye_regions.jpg', eye_roi)
|
149 |
+
|
150 |
+
# Calculate the average color of the eye region
|
151 |
+
avg_color = np.mean(eye_roi, axis=(0, 1))
|
152 |
+
|
153 |
+
# Classify eye color based on average color
|
154 |
+
if avg_color[0] > avg_color[1] and avg_color[0] > avg_color[2]:
|
155 |
+
color = "Brown"
|
156 |
+
elif avg_color[1] > avg_color[0] and avg_color[1] > avg_color[2]:
|
157 |
+
color = "Green"
|
158 |
+
else:
|
159 |
+
color = "Blue"
|
160 |
+
|
161 |
+
# Display the eye color
|
162 |
+
cv2.putText(image, color, (ex, ey - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
163 |
+
|
164 |
+
cv2.imwrite('segmented_face.jpg', image)
|
165 |
+
|
166 |
+
|
167 |
# Create the Gradio interface
|
168 |
demo = gr.Interface(
|
169 |
fn=classify_image_with_multiple_models, # The function to run
|