Spaces:
Running
Running
initial commits
Browse files- app.py +58 -0
- data/example.jpeg +0 -0
- emirhan.tflite +0 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import mediapipe as mp
|
5 |
+
from mediapipe.tasks import python
|
6 |
+
from mediapipe.tasks.python import vision
|
7 |
+
from mediapipe.python._framework_bindings import image as image_module
|
8 |
+
_Image = image_module.Image
|
9 |
+
from mediapipe.python._framework_bindings import image_frame
|
10 |
+
_ImageFormat = image_frame.ImageFormat
|
11 |
+
|
12 |
+
# Constants for colors
|
13 |
+
BG_COLOR = (0, 0, 0, 255) # gray with full opacity
|
14 |
+
MASK_COLOR = (255, 255, 255, 255) # white with full opacity
|
15 |
+
|
16 |
+
# Create the options that will be used for ImageSegmenter
|
17 |
+
base_options = python.BaseOptions(model_asset_path='emirhan.tflite')
|
18 |
+
options = vision.ImageSegmenterOptions(base_options=base_options,
|
19 |
+
output_category_mask=True)
|
20 |
+
|
21 |
+
# Function to segment hair and generate mask
|
22 |
+
def segment_hair(image):
|
23 |
+
rgba_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
|
24 |
+
rgba_image[:, :, 3] = 0 # Set alpha channel to empty
|
25 |
+
|
26 |
+
# Create MP Image object from numpy array
|
27 |
+
mp_image = _Image(image_format=_ImageFormat.SRGBA, data=rgba_image)
|
28 |
+
|
29 |
+
# Create the image segmenter
|
30 |
+
with vision.ImageSegmenter.create_from_options(options) as segmenter:
|
31 |
+
# Retrieve the masks for the segmented image
|
32 |
+
segmentation_result = segmenter.segment(mp_image)
|
33 |
+
category_mask = segmentation_result.category_mask
|
34 |
+
|
35 |
+
# Generate solid color images for showing the output segmentation mask.
|
36 |
+
image_data = mp_image.numpy_view()
|
37 |
+
fg_image = np.zeros(image_data.shape, dtype=np.uint8)
|
38 |
+
fg_image[:] = MASK_COLOR
|
39 |
+
bg_image = np.zeros(image_data.shape, dtype=np.uint8)
|
40 |
+
bg_image[:] = BG_COLOR
|
41 |
+
|
42 |
+
condition = np.stack((category_mask.numpy_view(),) * 4, axis=-1) > 0.2
|
43 |
+
output_image = np.where(condition, fg_image, bg_image)
|
44 |
+
|
45 |
+
return cv2.cvtColor(output_image, cv2.COLOR_RGBA2RGB)
|
46 |
+
|
47 |
+
# Gradio interface
|
48 |
+
iface = gr.Interface(
|
49 |
+
fn=segment_hair,
|
50 |
+
inputs=gr.inputs.Image(type="numpy"),
|
51 |
+
outputs=gr.outputs.Image(type="numpy"),
|
52 |
+
title="Hair Segmentation",
|
53 |
+
description="Upload an image to segment the hair and generate a mask.",
|
54 |
+
examples=["data/example.jpeg"]
|
55 |
+
)
|
56 |
+
|
57 |
+
if __name__ == "__main__":
|
58 |
+
iface.launch()
|
data/example.jpeg
ADDED
emirhan.tflite
ADDED
Binary file (781 kB). View file
|
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
opencv-python-headless
|
2 |
+
mediapipe
|
3 |
+
numpy
|
4 |
+
gradio
|