Update app.py
Browse files
app.py
CHANGED
@@ -5,72 +5,72 @@ from transformers import CLIPProcessor, CLIPModel, YolosImageProcessor, YolosFor
|
|
5 |
import torch
|
6 |
|
7 |
st.title("CLIP & CROP")
|
8 |
-
st.markdown("**Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers, if the similarity score is not so much, then please consider the prediction to be void.**")
|
9 |
|
10 |
-
IMAGE_INPUT = st.file_uploader(type=["jpg", "png"], label="Input image")
|
11 |
-
TEXT_INPUT = st.text_input(label="Description for section to extracted")
|
12 |
-
NUMBER_INPUT = st.number_input(value=0.96, label="Threshold percentage score")
|
13 |
|
14 |
|
15 |
-
with st.spinner("Models are loading"):
|
16 |
-
|
17 |
-
|
18 |
|
19 |
-
|
20 |
-
|
21 |
|
22 |
-
SUBMIT_BUTTON = st.button("SUBMIT")
|
23 |
|
24 |
-
def extract_image(image, text, prob, num=1):
|
25 |
|
26 |
-
|
27 |
-
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
|
40 |
-
|
41 |
-
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
|
52 |
-
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
|
67 |
-
|
68 |
|
69 |
-
|
70 |
-
|
71 |
|
72 |
-
if SUBMIT_BUTTON:
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
5 |
import torch
|
6 |
|
7 |
st.title("CLIP & CROP")
|
8 |
+
# st.markdown("**Extract sections of images from your image by using OpenAI's CLIP and Facebooks Detr implemented on HuggingFace Transformers, if the similarity score is not so much, then please consider the prediction to be void.**")
|
9 |
|
10 |
+
# IMAGE_INPUT = st.file_uploader(type=["jpg", "png"], label="Input image")
|
11 |
+
# TEXT_INPUT = st.text_input(label="Description for section to extracted")
|
12 |
+
# NUMBER_INPUT = st.number_input(value=0.96, label="Threshold percentage score")
|
13 |
|
14 |
|
15 |
+
# with st.spinner("Models are loading"):
|
16 |
+
# feature_extractor = YolosImageProcessor.from_pretrained("hustvl/yolos-tiny")
|
17 |
+
# dmodel = YolosForObjectDetection.from_pretrained('hustvl/yolos-tiny')
|
18 |
|
19 |
+
# model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
|
20 |
+
# processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
|
21 |
|
22 |
+
# SUBMIT_BUTTON = st.button("SUBMIT")
|
23 |
|
24 |
+
# def extract_image(image, text, prob, num=1):
|
25 |
|
26 |
+
# inputs = feature_extractor(images=image, return_tensors="pt")
|
27 |
+
# outputs = dmodel(**inputs)
|
28 |
|
29 |
+
# # model predicts bounding boxes and corresponding COCO classes
|
30 |
+
# logits = outputs.logits
|
31 |
+
# bboxes = outputs.pred_boxes
|
32 |
+
# probas = outputs.logits.softmax(-1)[0, :, :-1] #removing no class as detr maps
|
33 |
|
34 |
+
# keep = probas.max(-1).values > prob
|
35 |
+
# outs = feature_extractor.post_process(outputs, torch.tensor(image.size[::-1]).unsqueeze(0))
|
36 |
+
# bboxes_scaled = outs[0]['boxes'][keep].detach().numpy()
|
37 |
+
# labels = outs[0]['labels'][keep].detach().numpy()
|
38 |
+
# scores = outs[0]['scores'][keep].detach().numpy()
|
39 |
|
40 |
+
# images_list = []
|
41 |
+
# for i,j in enumerate(bboxes_scaled):
|
42 |
|
43 |
+
# xmin = int(j[0])
|
44 |
+
# ymin = int(j[1])
|
45 |
+
# xmax = int(j[2])
|
46 |
+
# ymax = int(j[3])
|
47 |
|
48 |
+
# im_arr = np.array(image)
|
49 |
+
# roi = im_arr[ymin:ymax, xmin:xmax]
|
50 |
+
# roi_im = Image.fromarray(roi)
|
51 |
|
52 |
+
# images_list.append(roi_im)
|
53 |
|
54 |
+
# inpu = processor(text = [text], images=images_list , return_tensors="pt", padding=True)
|
55 |
+
# output = model(**inpu)
|
56 |
+
# logits_per_image = output.logits_per_text
|
57 |
+
# probs = logits_per_image.softmax(-1)
|
58 |
+
# l_idx = np.argsort(probs[-1].detach().numpy())[::-1][0:num]
|
59 |
|
60 |
+
# final_ims = []
|
61 |
+
# for i,j in enumerate(images_list):
|
62 |
+
# json_dict = {}
|
63 |
+
# if i in l_idx:
|
64 |
+
# json_dict['image'] = images_list[i]
|
65 |
+
# json_dict['score'] = probs[-1].detach().numpy()[i]
|
66 |
|
67 |
+
# final_ims.append(json_dict)
|
68 |
|
69 |
+
# fi = sorted(final_ims, key=lambda item: item.get("score"), reverse=True)
|
70 |
+
# return fi[0]['image'], fi[0]['score']
|
71 |
|
72 |
+
# if SUBMIT_BUTTON:
|
73 |
+
# imageOutput, scoreOutput = extract(IMAGE_INPUT, TEXT_INPUT, NUMBER_INPUT)
|
74 |
+
# st.image(imageOutput, caption="Cropped Image")
|
75 |
+
# st.markdown("*Confidence Score:*")
|
76 |
+
# st.success(scoreOutput)
|