Spaces:
Sleeping
Sleeping
mrneuralnet
commited on
Commit
•
4142b1b
1
Parent(s):
8b1ecb4
Upload 14 files
Browse files- .gitattributes +4 -34
- .gitignore +4 -0
- ComVis_Tech_Assessment_Kecilin_Training.ipynb +0 -0
- README.md +1 -12
- app.py +98 -0
- pipelines.py +201 -0
- requirements.txt +5 -0
- sample_files/ambulance-1.jpg +3 -0
- sample_files/ambulance-2.jpeg +3 -0
- sample_files/ambulance.mp4 +3 -0
- sample_files/trucks-1.jpeg +3 -0
- sample_files/trucks-2.jpeg +3 -0
- sample_files/trucks.mp4 +3 -0
- utils.py +101 -0
.gitattributes
CHANGED
@@ -1,35 +1,5 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.
|
24 |
-
*.
|
25 |
-
*.
|
26 |
-
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.torchscript filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
temps/*
|
3 |
+
runs/*
|
4 |
+
.ipynb_checkpoints/
|
ComVis_Tech_Assessment_Kecilin_Training.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
README.md
CHANGED
@@ -1,12 +1 @@
|
|
1 |
-
|
2 |
-
title: Tech Assessment Kecilin
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: purple
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.32.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
"# tech-assessment-ai-engineer"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import json
|
3 |
+
import os, shutil
|
4 |
+
import re
|
5 |
+
import time
|
6 |
+
import uuid
|
7 |
+
|
8 |
+
import cv2
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import streamlit as st
|
12 |
+
from PIL import Image
|
13 |
+
# from extract_video import extract_method_single_video
|
14 |
+
|
15 |
+
from utils import st_file_selector, img2base64
|
16 |
+
from pipelines import ImagePipeline, VideoPipeline
|
17 |
+
|
18 |
+
import os
|
19 |
+
|
20 |
+
DEBUG = True
|
21 |
+
|
22 |
+
def main():
|
23 |
+
st.markdown("###")
|
24 |
+
uploaded_file = st.file_uploader('Upload a picture', type=['mp4', 'jpg', 'jpeg', 'png'], accept_multiple_files=False)
|
25 |
+
|
26 |
+
with st.spinner(f'Loading samples...'):
|
27 |
+
while not os.path.isdir("sample_files"):
|
28 |
+
time.sleep(1)
|
29 |
+
st.markdown("### or")
|
30 |
+
selected_file = st_file_selector(st, path='sample_files', key = 'selected', label = 'Choose a sample image/video')
|
31 |
+
|
32 |
+
if uploaded_file:
|
33 |
+
random_id = uuid.uuid1()
|
34 |
+
base_folder = "temps"
|
35 |
+
filename = "{}.{}".format(random_id, uploaded_file.type.split("/")[-1])
|
36 |
+
file_type = uploaded_file.type.split("/")[0]
|
37 |
+
filepath = f"{base_folder}/{filename}"
|
38 |
+
faces_folder = f"{base_folder}/images/{random_id}"
|
39 |
+
st.write(filepath)
|
40 |
+
if uploaded_file.type == 'video/mp4':
|
41 |
+
with open(f"temps/{filename}", mode='wb') as f:
|
42 |
+
f.write(uploaded_file.read())
|
43 |
+
video_path = filepath
|
44 |
+
st.video(uploaded_file)
|
45 |
+
else:
|
46 |
+
img = Image.open(uploaded_file).convert('RGB')
|
47 |
+
ext = uploaded_file.type.split("/")[-1]
|
48 |
+
st.image(img)
|
49 |
+
elif selected_file:
|
50 |
+
base_folder = "sample_files"
|
51 |
+
file_type = selected_file.split(".")[-1]
|
52 |
+
filename = selected_file.split("/")[-1]
|
53 |
+
filepath = f"{base_folder}/{selected_file}"
|
54 |
+
|
55 |
+
if file_type == 'mp4':
|
56 |
+
video_file = open(filepath, 'rb')
|
57 |
+
video_bytes = video_file.read()
|
58 |
+
st.video(video_bytes)
|
59 |
+
video_path = filepath
|
60 |
+
else:
|
61 |
+
img = Image.open(filepath).convert('RGB')
|
62 |
+
st.image(img)
|
63 |
+
else:
|
64 |
+
return
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
with st.spinner(f'Analyzing {file_type}...'):
|
70 |
+
if file_type == 'video' or file_type == 'mp4':
|
71 |
+
result = video_pipeline(video_path)
|
72 |
+
else:
|
73 |
+
result = image_pipeline({'images': [img2base64(np.array(img))]})
|
74 |
+
|
75 |
+
if 'found' in result['message']:
|
76 |
+
st.success(result['message'], icon="✅")
|
77 |
+
else:
|
78 |
+
st.error(result['message'], icon="🚨")
|
79 |
+
|
80 |
+
st.divider()
|
81 |
+
st.write('## Response JSON')
|
82 |
+
st.write(result)
|
83 |
+
|
84 |
+
|
85 |
+
def setup():
|
86 |
+
|
87 |
+
if not os.path.isdir("temps"):
|
88 |
+
os.makedirs("temps")
|
89 |
+
|
90 |
+
if __name__ == "__main__":
|
91 |
+
image_pipeline = ImagePipeline()
|
92 |
+
video_pipeline = VideoPipeline()
|
93 |
+
|
94 |
+
# with st.sidebar:
|
95 |
+
|
96 |
+
st.title("Face Fake Detection")
|
97 |
+
setup()
|
98 |
+
main()
|
pipelines.py
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
from ultralytics import YOLO
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
from utils import readb64, img2base64
|
7 |
+
|
8 |
+
model_int8 = YOLO('weights/best.torchscript', task='detect')
|
9 |
+
|
10 |
+
|
11 |
+
def inference_on_image(path):
|
12 |
+
results = model_int8(path)
|
13 |
+
|
14 |
+
img = cv2.imread(path, cv2.COLOR_BGR2RGB)
|
15 |
+
for box in results[0].boxes:
|
16 |
+
cls = box.cls.item()
|
17 |
+
confidence = box.conf.item()
|
18 |
+
label = labels[cls]
|
19 |
+
|
20 |
+
x1, y1, x2, y2 = map(int, list(box.xyxy.numpy()[0]))
|
21 |
+
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 102, 255), 2)
|
22 |
+
img = cv2.rectangle(img, (x1, y1 - 20), (x2, y1), (0, 102, 255), -1)
|
23 |
+
img = cv2.putText(img, "{}: {:.3f}".format(label, confidence), (x1,y1-5),cv2.FONT_HERSHEY_SIMPLEX,0.6,(255,255,255), 1)
|
24 |
+
|
25 |
+
cv2.imshow('Detected Image', img)
|
26 |
+
cv2.waitKey(0)
|
27 |
+
|
28 |
+
return results
|
29 |
+
|
30 |
+
def inference_on_video(path, vid_stride=10):
|
31 |
+
results = model_int8(path, vid_stride=10, stream=True)
|
32 |
+
|
33 |
+
cap = cv2.VideoCapture(path)
|
34 |
+
ret, img = cap.read()
|
35 |
+
|
36 |
+
frame_counter = 0
|
37 |
+
while True:
|
38 |
+
ret, img = cap.read()
|
39 |
+
if ret:
|
40 |
+
if frame_counter % 10 == 0:
|
41 |
+
result = next(results)
|
42 |
+
for box in result.boxes:
|
43 |
+
cls = box.cls.item()
|
44 |
+
confidence = box.conf.item()
|
45 |
+
label = labels[cls]
|
46 |
+
|
47 |
+
x1, y1, x2, y2 = map(int, list(box.xyxy.numpy()[0]))
|
48 |
+
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 102, 255), 2)
|
49 |
+
img = cv2.rectangle(img, (x1, y1 - 20), (x2, y1), (0, 102, 255), -1)
|
50 |
+
img = cv2.putText(img, "{}: {:.3f}".format(label, confidence), (x1,y1-5),cv2.FONT_HERSHEY_SIMPLEX,0.6,(255,255,255), 1)
|
51 |
+
else:
|
52 |
+
cap.release()
|
53 |
+
break
|
54 |
+
|
55 |
+
cv2.imshow('Detected Image', img)
|
56 |
+
frame_counter += 1
|
57 |
+
|
58 |
+
k = cv2.waitKey(5) & 0xFF
|
59 |
+
if k == 27:
|
60 |
+
cap.release()
|
61 |
+
cv2.destroyAllWindows()
|
62 |
+
break
|
63 |
+
|
64 |
+
return results
|
65 |
+
|
66 |
+
|
67 |
+
class ImagePipeline:
|
68 |
+
def __init__(self, device='cpu', gpu_id=0, weights='weights/best.torchscript'):
|
69 |
+
self.model = YOLO(weights, task='detect')
|
70 |
+
|
71 |
+
def preprocess(self, data):
|
72 |
+
image_base64 = data.pop("images", data)
|
73 |
+
|
74 |
+
if not type(image_base64) == list:
|
75 |
+
image_base64 = [image_base64]
|
76 |
+
elif len(image_base64) > 1:
|
77 |
+
raise Exception("ImagePipeline only accepts 1 image/frame")
|
78 |
+
|
79 |
+
images = [readb64(image) for image in image_base64]
|
80 |
+
return images
|
81 |
+
|
82 |
+
def inference(self, images):
|
83 |
+
results = self.model(images[0])
|
84 |
+
return results
|
85 |
+
|
86 |
+
def get_response(self, inference_result):
|
87 |
+
response = []
|
88 |
+
|
89 |
+
if 0 in inference_result[0].boxes.cls.numpy():
|
90 |
+
message = "An ambulance is found "
|
91 |
+
else:
|
92 |
+
message = "There is no ambulance"
|
93 |
+
|
94 |
+
for i, result in enumerate(inference_result):
|
95 |
+
for xywhn, cls, conf in zip(
|
96 |
+
result.boxes.xywhn,
|
97 |
+
result.boxes.cls,
|
98 |
+
result.boxes.conf
|
99 |
+
):
|
100 |
+
xywhn = list(xywhn.numpy())
|
101 |
+
response.append({
|
102 |
+
'xywhn': {
|
103 |
+
'x': float(xywhn[0]),
|
104 |
+
'y': float(xywhn[1]),
|
105 |
+
'w': float(xywhn[2]),
|
106 |
+
'h': float(xywhn[3]),
|
107 |
+
},
|
108 |
+
'class': cls.item(),
|
109 |
+
'confidence': conf.item(),
|
110 |
+
})
|
111 |
+
|
112 |
+
return {'results': response,
|
113 |
+
'message': message}
|
114 |
+
|
115 |
+
def __call__(self, data, config_payload=None):
|
116 |
+
images = self.preprocess(data)
|
117 |
+
inference_result = self.inference(images)
|
118 |
+
response = self.get_response(inference_result)
|
119 |
+
return response
|
120 |
+
|
121 |
+
class VideoPipeline:
|
122 |
+
def __init__(self, device='cpu', gpu_id=0, weights='weights/best.torchscript'):
|
123 |
+
self.model = YOLO(weights, task='detect')
|
124 |
+
|
125 |
+
def preprocess(self, data):
|
126 |
+
return data
|
127 |
+
|
128 |
+
def inference(self, video_path, vid_stride=30):
|
129 |
+
results = self.model(video_path, vid_stride=vid_stride)
|
130 |
+
return results
|
131 |
+
|
132 |
+
def get_response(self, inference_result):
|
133 |
+
response = []
|
134 |
+
|
135 |
+
|
136 |
+
# default message
|
137 |
+
message = "There is no ambulance"
|
138 |
+
|
139 |
+
for i, result in enumerate(inference_result):
|
140 |
+
|
141 |
+
if 0 in result.boxes.cls.numpy():
|
142 |
+
message = "An ambulance is found"
|
143 |
+
|
144 |
+
for xywhn, cls, conf in zip(
|
145 |
+
result.boxes.xywhn,
|
146 |
+
result.boxes.cls,
|
147 |
+
result.boxes.conf
|
148 |
+
):
|
149 |
+
xywhn = list(xywhn.numpy())
|
150 |
+
response.append({
|
151 |
+
'xywhn': {
|
152 |
+
'x': float(xywhn[0]),
|
153 |
+
'y': float(xywhn[1]),
|
154 |
+
'w': float(xywhn[2]),
|
155 |
+
'h': float(xywhn[3]),
|
156 |
+
},
|
157 |
+
'class': cls.item(),
|
158 |
+
'confidence': conf.item(),
|
159 |
+
})
|
160 |
+
|
161 |
+
return {'results': response,
|
162 |
+
'message': message}
|
163 |
+
|
164 |
+
def __call__(self, data, config_payload=None):
|
165 |
+
data = self.preprocess(data)
|
166 |
+
inference_result = self.inference(data)
|
167 |
+
response = self.get_response(inference_result)
|
168 |
+
return response
|
169 |
+
|
170 |
+
|
171 |
+
if __name__ == '__main__':
|
172 |
+
import cv2
|
173 |
+
import argparse
|
174 |
+
|
175 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
176 |
+
parser.add_argument('--input_type',
|
177 |
+
default='image',
|
178 |
+
const='image',
|
179 |
+
nargs='?',
|
180 |
+
choices=['image', 'video'],
|
181 |
+
help='type of input (default: %(default)s)')
|
182 |
+
parser.add_argument("-p", "--path", help="filepath")
|
183 |
+
args = parser.parse_args()
|
184 |
+
|
185 |
+
labels = {
|
186 |
+
0: 'ambulance',
|
187 |
+
1: 'truck'
|
188 |
+
}
|
189 |
+
|
190 |
+
if args.input_type=='image':
|
191 |
+
results = inference_on_image(args.path)
|
192 |
+
elif args.input_type == 'video':
|
193 |
+
results = inference_on_video(args.path)
|
194 |
+
|
195 |
+
|
196 |
+
print(results)
|
197 |
+
|
198 |
+
|
199 |
+
# Examples
|
200 |
+
# python pipelines.py --input_type image --path sample_files/ambulance-2.jpeg
|
201 |
+
# python pipelines.py --input_type video --path sample_files/ambulance.mp4
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.2.1
|
2 |
+
ultralytics==8.0.186
|
3 |
+
streamlit
|
4 |
+
pillow==10.2.0
|
5 |
+
opencv-python-headless
|
sample_files/ambulance-1.jpg
ADDED
Git LFS Details
|
sample_files/ambulance-2.jpeg
ADDED
Git LFS Details
|
sample_files/ambulance.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f69b30105bad124cc434152707ea3d3ff238f4a9fcb8f2b3774cb2195d5a52c
|
3 |
+
size 22860129
|
sample_files/trucks-1.jpeg
ADDED
Git LFS Details
|
sample_files/trucks-2.jpeg
ADDED
Git LFS Details
|
sample_files/trucks.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:072f5936821029eab4a1a4866a67df872bfa5b4fa1a51ca7793a4e787435901c
|
3 |
+
size 7029598
|
utils.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import tempfile, base64
|
5 |
+
|
6 |
+
|
7 |
+
def readb64(uri):
|
8 |
+
encoded_data = uri.split(',')[-1]
|
9 |
+
nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8)
|
10 |
+
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
11 |
+
return img
|
12 |
+
|
13 |
+
def img2base64(img, extension="jpg"):
|
14 |
+
_, img_encoded = cv2.imencode(f".{extension}", img)
|
15 |
+
img_base64 = base64.b64encode(img_encoded)
|
16 |
+
img_base64 = img_base64.decode('utf-8')
|
17 |
+
return img_base64
|
18 |
+
|
19 |
+
def binary2video(video_binary):
|
20 |
+
# byte_arr = BytesIO()
|
21 |
+
# byte_arr.write(video_binary)
|
22 |
+
|
23 |
+
temp_ = tempfile.NamedTemporaryFile(suffix='.mp4')
|
24 |
+
# decoded_string = base64.b64decode(video_binary)
|
25 |
+
|
26 |
+
temp_.write(video_binary)
|
27 |
+
video_capture = cv2.VideoCapture(temp_.name)
|
28 |
+
ret, frame = video_capture.read()
|
29 |
+
return video_capture
|
30 |
+
|
31 |
+
def extract_frames(data_path, interval=30, max_frames=50):
|
32 |
+
"""Method to extract frames"""
|
33 |
+
cap = cv2.VideoCapture(data_path)
|
34 |
+
frame_num = 0
|
35 |
+
frames = list()
|
36 |
+
|
37 |
+
while cap.isOpened():
|
38 |
+
success, image = cap.read()
|
39 |
+
if not success:
|
40 |
+
break
|
41 |
+
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
42 |
+
# image = torch.tensor(image) - torch.tensor([104, 117, 123])
|
43 |
+
if frame_num % interval == 0:
|
44 |
+
frames.append(image)
|
45 |
+
frame_num += 1
|
46 |
+
if len(frames) > max_frames:
|
47 |
+
break
|
48 |
+
cap.release()
|
49 |
+
# if len(frames) > max_frames:
|
50 |
+
# samples = np.random.choice(
|
51 |
+
# np.arange(0, len(frames)), size=max_frames, replace=False)
|
52 |
+
# return [frames[_] for _ in samples]
|
53 |
+
return frames
|
54 |
+
|
55 |
+
"""FilePicker for streamlit.
|
56 |
+
Still doesn't seem to be a good solution for a way to select files to process from the server Streamlit is running on.
|
57 |
+
Here's a pretty functional solution.
|
58 |
+
Usage:
|
59 |
+
```
|
60 |
+
import streamlit as st
|
61 |
+
from filepicker import st_file_selector
|
62 |
+
tif_file = st_file_selector(st, key = 'tif', label = 'Choose tif file')
|
63 |
+
```
|
64 |
+
"""
|
65 |
+
|
66 |
+
import os
|
67 |
+
import streamlit as st
|
68 |
+
|
69 |
+
def update_dir(key):
|
70 |
+
choice = st.session_state[key]
|
71 |
+
if os.path.isdir(os.path.join(st.session_state[key+'curr_dir'], choice)):
|
72 |
+
st.session_state[key+'curr_dir'] = os.path.normpath(os.path.join(st.session_state[key+'curr_dir'], choice))
|
73 |
+
files = sorted(os.listdir(st.session_state[key+'curr_dir']))
|
74 |
+
if "images" in files:
|
75 |
+
files.remove("images")
|
76 |
+
st.session_state[key+'files'] = files
|
77 |
+
|
78 |
+
def st_file_selector(st_placeholder, path='.', label='Select a file/folder', key = 'selected'):
|
79 |
+
if key+'curr_dir' not in st.session_state:
|
80 |
+
base_path = '.' if path is None or path == '' else path
|
81 |
+
base_path = base_path if os.path.isdir(base_path) else os.path.dirname(base_path)
|
82 |
+
base_path = '.' if base_path is None or base_path == '' else base_path
|
83 |
+
|
84 |
+
files = sorted(os.listdir(base_path))
|
85 |
+
files.insert(0, 'Choose a file...')
|
86 |
+
if "images" in files:
|
87 |
+
files.remove("images")
|
88 |
+
st.session_state[key+'files'] = files
|
89 |
+
st.session_state[key+'curr_dir'] = base_path
|
90 |
+
else:
|
91 |
+
base_path = st.session_state[key+'curr_dir']
|
92 |
+
|
93 |
+
selected_file = st_placeholder.selectbox(label=label,
|
94 |
+
options=st.session_state[key+'files'],
|
95 |
+
key=key,
|
96 |
+
on_change = lambda: update_dir(key))
|
97 |
+
|
98 |
+
if selected_file == "Choose a file...":
|
99 |
+
return None
|
100 |
+
|
101 |
+
return selected_file
|