sayedM commited on
Commit
5a1edc8
1 Parent(s): 5f687a2

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +175 -0
  2. best_upwork.onnx +3 -0
app.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import time
4
+ #import os
5
+ #import datetime
6
+ #from datetime import datetime
7
+ #from PIL import Image
8
+ #from io import BytesIO
9
+ #import requests
10
+ #from scipy import ndimage
11
+
12
+
13
+ INPUT_WIDTH = 320
14
+ INPUT_HEIGHT = 320
15
+ SCORE_THRESHOLD = 0.45
16
+ NMS_THRESHOLD = 0.45
17
+ CONFIDENCE_THRESHOLD = 0.5
18
+
19
+ # Text parameters.
20
+ FONT_FACE = cv2.FONT_HERSHEY_SIMPLEX
21
+ FONT_SCALE = 0.7
22
+ THICKNESS = 1
23
+
24
+ # Colors.
25
+ BLACK = (0,0,0)
26
+ BLUE = (255,178,50)
27
+ YELLOW = (0,255,255)
28
+ classesFile = "coco.names"
29
+ classes = None
30
+
31
+
32
+ ch_detection_modelWeights = "best_upwork.onnx"
33
+ ch_detection_model = cv2.dnn.readNet(ch_detection_modelWeights)
34
+ x_=["-","0","1","2","3","4","5","6","7","8","9"]
35
+
36
+
37
+
38
+
39
+
40
+ def draw_label(im, label, x, y):
41
+ """Draw text onto image at location."""
42
+ # Get text size.
43
+ text_size = cv2.getTextSize(label, FONT_FACE, FONT_SCALE, THICKNESS)
44
+ dim, baseline = text_size[0], text_size[1]
45
+ # Use text size to create a BLACK rectangle.
46
+ cv2.rectangle(im, (x,y), (x + dim[0], y + dim[1] + baseline), (0,0,0), cv2.FILLED);
47
+ # Display text inside the rectangle.
48
+ cv2.putText(im, label, (x, y + dim[1]), FONT_FACE, FONT_SCALE, YELLOW, THICKNESS, cv2.LINE_AA)
49
+ def pre_process(input_image, net,w,h):
50
+ # Create a 4D blob from a frame.
51
+ #print(input_image.shape)
52
+ blob = cv2.dnn.blobFromImage(input_image, 1/255, (w, h), [0,0,0], 1, crop=False)
53
+
54
+ # Sets the input to the network.
55
+ net.setInput(blob)
56
+
57
+ # Run the forward pass to get output of the output layers.
58
+ outputs = net.forward(net.getUnconnectedOutLayersNames())
59
+ return outputs
60
+
61
+ def get_xyxy(input_image,image_height,image_width, outputs,w,h):
62
+ # Lists to hold respective values while unwrapping.
63
+ class_ids = []
64
+ confidences = []
65
+ boxes = []
66
+ output_boxes=[]
67
+ results_cls_id=[]
68
+ # Rows.
69
+ rows = outputs[0].shape[1]
70
+
71
+ x_factor = image_width / w
72
+ y_factor = image_height / h
73
+ # Iterate through detections.
74
+ for r in range(rows):
75
+ row = outputs[0][0][r]
76
+ confidence = row[4]
77
+ # Discard bad detections and continue.
78
+ if confidence >= CONFIDENCE_THRESHOLD:
79
+ classes_scores = row[5:]
80
+ # Get the index of max class score.
81
+ class_id = np.argmax(classes_scores)
82
+ # Continue if the class score is above threshold.
83
+ if (classes_scores[class_id] > SCORE_THRESHOLD):
84
+ confidences.append(confidence)
85
+ class_ids.append(class_id)
86
+ cx, cy, w, h = row[0], row[1], row[2], row[3]
87
+ left = int((cx - w/2) * x_factor)
88
+ top = int((cy - h/2) * y_factor)
89
+ width = int(w * x_factor)
90
+ height = int(h * y_factor)
91
+ box = np.array([left, top, width, height,])
92
+ boxes.append(box)
93
+ # Perform non maximum suppression to eliminate redundant, overlapping boxes with lower confidences.
94
+ indices = cv2.dnn.NMSBoxes(boxes, confidences, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)
95
+ for i in indices:
96
+ box = boxes[i]
97
+ left = box[0]
98
+ top = box[1]
99
+ width = box[2]
100
+ height = box[3]
101
+ results_cls_id.append(class_ids[i])
102
+
103
+ cv2.rectangle(input_image, (left, top), (left + width, top + height), BLUE, 1)
104
+
105
+ boxes[i][2]=left + width
106
+ boxes[i][3]=top + height
107
+ #check if the height is suitable
108
+ output_boxes.append(boxes[i])
109
+ cv2.imwrite('x1.jpg',input_image)
110
+ return output_boxes,results_cls_id #boxes (left,top,width,height)
111
+
112
+ def char_det(input_image,ch_detection_model,w,h):
113
+ #in_image_copy=input_image.copy()
114
+ detections = pre_process(input_image.copy(), ch_detection_model,w,h) #detection results
115
+ image_height=input_image.shape[0]
116
+ image_width=input_image.shape[1]
117
+ bounding_boxes=get_xyxy(input_image,image_height,image_width, detections,w,h)
118
+ #date = datetime.now().strftime("%Y_%m_%d_%I_%M_%S_%p")
119
+ #im_name=f"ch_{date}.jpg"
120
+ #print(im_name)
121
+ #cv2.imwrite(im_name,image_with_bounding_boxes)
122
+ # cv2.imwrite('x1.jpg',image_with_bounding_boxes)
123
+ return bounding_boxes
124
+
125
+ def rearange_(array_pred,results_cls_id):
126
+ scores=''
127
+ #print(y2,y2[:,0])
128
+ ind=np.argsort(array_pred[:,0])
129
+
130
+ #print(license_image.shape[0],ind)
131
+ for indx in (ind):
132
+ scores=scores+x_[results_cls_id[indx]]
133
+
134
+
135
+ return scores
136
+
137
+
138
+ def main_func(img,):
139
+ scores=0
140
+ t1=time.time()
141
+ img = np.array(img)
142
+ im2=img.copy()
143
+
144
+ #send_im_2_tg(img)
145
+ #cv2.imwrite(f"inp.jpg",img)
146
+ width_height_diff=img.shape[1]-img.shape[0] #padding
147
+ #print(width_height_diff,img.shape)
148
+ if width_height_diff>0:
149
+ img = cv2.copyMakeBorder(img, 0, width_height_diff, 0, 0, cv2.BORDER_CONSTANT, (0,0,0))
150
+ if width_height_diff<0:
151
+ img = cv2.copyMakeBorder(img, 0, 0, 0, int(-1*width_height_diff), cv2.BORDER_CONSTANT, (0,0,0))
152
+
153
+
154
+ cropped_chars_array,results_cls_id=char_det(img.copy(),ch_detection_model,320,320)
155
+ if len(cropped_chars_array)!=0:
156
+ cropped_chars_array=np.asarray(cropped_chars_array)
157
+ scores=rearange_(cropped_chars_array,results_cls_id)
158
+ for box in cropped_chars_array:
159
+ left,top,width,height=box
160
+ cv2.rectangle(im2, (left, top), (width,height), BLUE, 1)
161
+
162
+ time_of_process=(time.time()-t1)
163
+
164
+ #return scores,time_of_process
165
+ return scores,im2,time_of_process
166
+
167
+
168
+ import gradio as gr
169
+ def final_func():
170
+ gr.Interface(fn=main_func,
171
+ inputs=gr.Image(),
172
+ outputs=[gr.Textbox(lines=1, label="Scores"),gr.Image(label="Image"),gr.Number(label="Time")]).launch()
173
+
174
+ if __name__ == "__main__":
175
+ final_func()
best_upwork.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9909e56eef03c83f2851a799db30444e623af3f843d87bad8d113664f707813
3
+ size 28317318