Spaces:
Sleeping
Sleeping
yuanze1024
commited on
Commit
•
d89efd0
1
Parent(s):
ff678a2
change the user in dockerfile
Browse files- Dockerfile +6 -1
- app.py +4 -3
- feature_extractors/uni3d_embedding_encoder.py +12 -6
Dockerfile
CHANGED
@@ -3,8 +3,13 @@ FROM nvcr.io/nvidia/pytorch:23.08-py3
|
|
3 |
LABEL maintainer="yuanze"
|
4 |
LABEL email="[email protected]"
|
5 |
|
|
|
|
|
|
|
6 |
WORKDIR /code
|
7 |
|
|
|
|
|
8 |
# Install webp support
|
9 |
RUN apt update && apt install libwebp-dev -y
|
10 |
|
@@ -24,6 +29,6 @@ COPY ./change_setup.txt /code/Pointnet2_PyTorch/pointnet2_ops_lib/setup.py
|
|
24 |
RUN cd Pointnet2_PyTorch/pointnet2_ops_lib \
|
25 |
&& pip install .
|
26 |
|
27 |
-
COPY .
|
28 |
|
29 |
CMD ["python", "app.py"]
|
|
|
3 |
LABEL maintainer="yuanze"
|
4 |
LABEL email="[email protected]"
|
5 |
|
6 |
+
RUN useradd -m -u 1000 user
|
7 |
+
USER user
|
8 |
+
|
9 |
WORKDIR /code
|
10 |
|
11 |
+
RUN chown -R user:user /code
|
12 |
+
|
13 |
# Install webp support
|
14 |
RUN apt update && apt install libwebp-dev -y
|
15 |
|
|
|
29 |
RUN cd Pointnet2_PyTorch/pointnet2_ops_lib \
|
30 |
&& pip install .
|
31 |
|
32 |
+
COPY --chown=user:user . /code
|
33 |
|
34 |
CMD ["python", "app.py"]
|
app.py
CHANGED
@@ -10,13 +10,14 @@ from feature_extractors.uni3d_embedding_encoder import Uni3dEmbeddingEncoder
|
|
10 |
MAX_BATCH_SIZE = 16
|
11 |
MAX_QUEUE_SIZE = 10
|
12 |
MAX_K_RETRIEVAL = 20
|
|
|
13 |
|
14 |
-
encoder = Uni3dEmbeddingEncoder()
|
15 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
source_id_list = torch.load("data/source_id_list.pt")
|
17 |
source_to_id = {source_id: i for i, source_id in enumerate(source_id_list)}
|
18 |
-
dataset = load_dataset("VAST-AI/LD-T3D", name=f"rendered_imgs_diag_above", split="base")
|
19 |
-
relation = load_dataset("VAST-AI/LD-T3D", split="full")
|
20 |
|
21 |
@functools.lru_cache()
|
22 |
def get_embedding(option, modality, angle=None):
|
|
|
10 |
MAX_BATCH_SIZE = 16
|
11 |
MAX_QUEUE_SIZE = 10
|
12 |
MAX_K_RETRIEVAL = 20
|
13 |
+
cache_dir = "./.cache"
|
14 |
|
15 |
+
encoder = Uni3dEmbeddingEncoder(cache_dir)
|
16 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
source_id_list = torch.load("data/source_id_list.pt")
|
18 |
source_to_id = {source_id: i for i, source_id in enumerate(source_id_list)}
|
19 |
+
dataset = load_dataset("VAST-AI/LD-T3D", name=f"rendered_imgs_diag_above", split="base", cache_dir=cache_dir)
|
20 |
+
relation = load_dataset("VAST-AI/LD-T3D", split="full", cache_dir=cache_dir)
|
21 |
|
22 |
@functools.lru_cache()
|
23 |
def get_embedding(option, modality, angle=None):
|
feature_extractors/uni3d_embedding_encoder.py
CHANGED
@@ -279,17 +279,23 @@ def create_uni3d(uni3d_path):
|
|
279 |
return model
|
280 |
|
281 |
class Uni3dEmbeddingEncoder(FeatureExtractor):
|
282 |
-
def __init__(self, cache_dir
|
283 |
bpe_path = "utils/bpe_simple_vocab_16e6.txt.gz"
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
|
|
|
|
|
|
288 |
hf_hub_download("timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k", "open_clip_pytorch_model.bin",
|
289 |
-
|
290 |
|
291 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
292 |
self.tokenizer = SimpleTokenizer(bpe_path)
|
|
|
|
|
|
|
293 |
self.clip_model, _, self.preprocess = open_clip.create_model_and_transforms(model_name="EVA02-E-14-plus", pretrained=clip_path)
|
294 |
self.clip_model.to(self.device)
|
295 |
|
|
|
279 |
return model
|
280 |
|
281 |
class Uni3dEmbeddingEncoder(FeatureExtractor):
|
282 |
+
def __init__(self, cache_dir, **kwargs) -> None:
|
283 |
bpe_path = "utils/bpe_simple_vocab_16e6.txt.gz"
|
284 |
+
# uni3d_path = os.path.join(cache_dir, "Uni3D", "modelzoo", "uni3d-g", "model.pt") # concat the subfolder as hf_hub_download will put it here
|
285 |
+
clip_path = os.path.join(cache_dir, "Uni3D", "open_clip_pytorch_model.bin")
|
286 |
+
|
287 |
+
# if not os.path.exists(uni3d_path):
|
288 |
+
# hf_hub_download("BAAI/Uni3D", "model.pt", subfolder="modelzoo/uni3d-g", cache_dir=cache_dir,
|
289 |
+
# local_dir=cache_dir + os.sep + "Uni3D")
|
290 |
+
if not os.path.exists(clip_path):
|
291 |
hf_hub_download("timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k", "open_clip_pytorch_model.bin",
|
292 |
+
cache_dir=cache_dir, local_dir=cache_dir + os.sep + "Uni3D")
|
293 |
|
294 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
295 |
self.tokenizer = SimpleTokenizer(bpe_path)
|
296 |
+
# self.model = create_uni3d(uni3d_path)
|
297 |
+
# self.model.eval()
|
298 |
+
# self.model.to(self.device)
|
299 |
self.clip_model, _, self.preprocess = open_clip.create_model_and_transforms(model_name="EVA02-E-14-plus", pretrained=clip_path)
|
300 |
self.clip_model.to(self.device)
|
301 |
|