Gilad Avidan
commited on
Commit
•
5f41ad3
1
Parent(s):
78cd05e
change output format
Browse files- __pycache__/handler.cpython-311.pyc +0 -0
- handler.py +24 -2
__pycache__/handler.cpython-311.pyc
CHANGED
Binary files a/__pycache__/handler.cpython-311.pyc and b/__pycache__/handler.cpython-311.pyc differ
|
|
handler.py
CHANGED
@@ -1,10 +1,26 @@
|
|
1 |
import base64
|
|
|
|
|
2 |
from io import BytesIO
|
3 |
from typing import Dict, List, Any
|
4 |
from PIL import Image
|
5 |
import torch
|
6 |
from transformers import SamModel, SamProcessor
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
class EndpointHandler():
|
9 |
def __init__(self, path=""):
|
10 |
# Preload all the elements you are going to need at inference.
|
@@ -30,7 +46,13 @@ class EndpointHandler():
|
|
30 |
|
31 |
model_inputs = self.processor(image, input_points=input_points, return_tensors="pt").to(self.device)
|
32 |
outputs = self.model(**model_inputs)
|
33 |
-
masks = self.processor.image_processor.post_process_masks(
|
|
|
|
|
|
|
34 |
scores = outputs.iou_scores
|
35 |
|
36 |
-
|
|
|
|
|
|
|
|
1 |
import base64
|
2 |
+
import gzip
|
3 |
+
import numpy as np
|
4 |
from io import BytesIO
|
5 |
from typing import Dict, List, Any
|
6 |
from PIL import Image
|
7 |
import torch
|
8 |
from transformers import SamModel, SamProcessor
|
9 |
|
10 |
+
def pack_bits(boolean_tensor):
|
11 |
+
# Flatten the tensor and add padding if necessary
|
12 |
+
flat = boolean_tensor.flatten()
|
13 |
+
if flat.size()[0] % 8 != 0:
|
14 |
+
padding = np.zeros((8 - flat.size % 8,), dtype=bool)
|
15 |
+
flat = np.concatenate([flat, padding])
|
16 |
+
|
17 |
+
# Reshape into bytes and pack into binary string
|
18 |
+
packed = np.packbits(flat.reshape((-1, 8)))
|
19 |
+
packed = packed.tobytes()
|
20 |
+
return gzip.compress(packed)
|
21 |
+
# json_str = json.dumps({"shape": boolean_tensor.shape, "data": binary_str})
|
22 |
+
|
23 |
+
|
24 |
class EndpointHandler():
|
25 |
def __init__(self, path=""):
|
26 |
# Preload all the elements you are going to need at inference.
|
|
|
46 |
|
47 |
model_inputs = self.processor(image, input_points=input_points, return_tensors="pt").to(self.device)
|
48 |
outputs = self.model(**model_inputs)
|
49 |
+
masks = self.processor.image_processor.post_process_masks(
|
50 |
+
outputs.pred_masks.cpu(),
|
51 |
+
model_inputs["original_sizes"].cpu(),
|
52 |
+
model_inputs["reshaped_input_sizes"].cpu())
|
53 |
scores = outputs.iou_scores
|
54 |
|
55 |
+
packed = [base64.b64encode(pack_bits(masks[0][0][i])).decode() for i in range(masks[0].shape[1])]
|
56 |
+
shape = list(masks[0].shape)[2:]
|
57 |
+
|
58 |
+
return {"masks": packed, "scores": scores[0][0].tolist(), "shape": shape}
|