omarhkh commited on
Commit
5eb1119
1 Parent(s): df5cff8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -7
app.py CHANGED
@@ -75,12 +75,12 @@ def detect_objects(model_name,url_input,image_input,threshold):
75
 
76
  return viz_img
77
 
78
- def detect_objects2(model_name,url_input,image_input,threshold):
79
 
80
  #Extract model and feature extractor
81
  feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
82
 
83
-
84
 
85
  model = DetrForObjectDetection.from_pretrained(model_name)
86
 
@@ -97,8 +97,33 @@ def detect_objects2(model_name,url_input,image_input,threshold):
97
  viz_img = visualize_prediction(image, processed_outputs, threshold, model.config.id2label)
98
 
99
  keep = processed_outputs["scores"] > threshold
 
 
 
 
 
 
 
 
100
 
101
- return processed_outputs["labels"][keep].tolist()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  def set_example_image(example: list) -> dict:
104
  return gr.Image.update(value=example[0])
@@ -118,7 +143,7 @@ Links to HuggingFace Models:
118
  """
119
 
120
  models = ["omarhkh/detr-finetuned-omar8"]
121
-
122
 
123
  css = '''
124
  h1#title {
@@ -135,6 +160,7 @@ with demo:
135
 
136
 
137
  options = gr.Dropdown(choices=models,label='Select Object Detection Model',show_label=True)
 
138
  slider_input = gr.Slider(minimum=0.1,maximum=1,value=0.7,label='Prediction Threshold')
139
 
140
  with gr.Tabs():
@@ -151,10 +177,12 @@ with demo:
151
  img_but = gr.Button('Detect')
152
 
153
  with gr.Blocks():
154
- name = gr.Textbox(label="Name")
155
- output = gr.Textbox(label="Results")
156
  greet_btn = gr.Button("Results")
157
- greet_btn.click(fn=detect_objects2, inputs=[options,img_input,img_input,slider_input], outputs=output, queue=True)
 
 
158
 
159
 
160
 
 
75
 
76
  return viz_img
77
 
78
+ def detect_objects2(model_name,url_input,image_input,threshold,type2):
79
 
80
  #Extract model and feature extractor
81
  feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
82
 
83
+ xxresult=0
84
 
85
  model = DetrForObjectDetection.from_pretrained(model_name)
86
 
 
97
  viz_img = visualize_prediction(image, processed_outputs, threshold, model.config.id2label)
98
 
99
  keep = processed_outputs["scores"] > threshold
100
+ det_lab = processed_outputs["labels"][keep].tolist()
101
+ det_lab.count(1)
102
+ if det_lab.count(1) > 0:
103
+ total_text="Trench is Detected \n"
104
+ else:
105
+ total_text="Trench is NOT Detected \n"
106
+ xxresult=1
107
+
108
 
109
+ if det_lab.count(4) > 0:
110
+ total_text+="Measuring Tape (Vertical) for measuring Depth is Detected \n"
111
+ else:
112
+ total_text+="Measuring Tape (Vertical) for measuring Depth is NOT Detected \n
113
+ if type2=="Trench Depth Measurement":
114
+ xxresult=1
115
+
116
+ if det_lab.count(5) > 0:
117
+ total_text+="Measuring Tape (Horizontal) for measuring Width is Detected \n"
118
+ else:
119
+ total_text+="Measuring Tape (Horizontal) for measuring Width is NOT Detected \n"
120
+ if type2=="Trench Width Measurement":
121
+ xxresult=1
122
+ if xxresult==0:
123
+ text2 = "The photo is ACCEPTED"
124
+ else:
125
+ text2 = "The photo is NOT ACCEPTED"
126
+ return total_text, text2
127
 
128
  def set_example_image(example: list) -> dict:
129
  return gr.Image.update(value=example[0])
 
143
  """
144
 
145
  models = ["omarhkh/detr-finetuned-omar8"]
146
+ types_class = ["Trench Depth Measurement", "Trench Width Measurement"]
147
 
148
  css = '''
149
  h1#title {
 
160
 
161
 
162
  options = gr.Dropdown(choices=models,label='Select Object Detection Model',show_label=True)
163
+ options2 = gr.Dropdown(choices=types_class,label='Select Classification Type',show_label=True)
164
  slider_input = gr.Slider(minimum=0.1,maximum=1,value=0.7,label='Prediction Threshold')
165
 
166
  with gr.Tabs():
 
177
  img_but = gr.Button('Detect')
178
 
179
  with gr.Blocks():
180
+ name = gr.Textbox(label="Final Result")
181
+ output = gr.Textbox(label="Reason for the results")
182
  greet_btn = gr.Button("Results")
183
+ greet_btn.click(fn=detect_objects2[0], inputs=[options,img_input,img_input,slider_input,options2], outputs=output, queue=True)
184
+ name.change(fn=detect_objects2[1], inputs=[options,img_input,img_input,slider_input,options2], outputs=name, queue=True)
185
+
186
 
187
 
188