Spaces:
Runtime error
Runtime error
Commit
·
ff678a2
1
Parent(s):
935914f
remove local cache
Browse files- app.py +3 -4
- feature_extractors/uni3d_embedding_encoder.py +6 -12
app.py
CHANGED
@@ -10,14 +10,13 @@ from feature_extractors.uni3d_embedding_encoder import Uni3dEmbeddingEncoder
|
|
10 |
MAX_BATCH_SIZE = 16
|
11 |
MAX_QUEUE_SIZE = 10
|
12 |
MAX_K_RETRIEVAL = 20
|
13 |
-
cache_dir = "."
|
14 |
|
15 |
-
encoder = Uni3dEmbeddingEncoder(
|
16 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
source_id_list = torch.load("data/source_id_list.pt")
|
18 |
source_to_id = {source_id: i for i, source_id in enumerate(source_id_list)}
|
19 |
-
dataset = load_dataset("VAST-AI/LD-T3D", name=f"rendered_imgs_diag_above", split="base"
|
20 |
-
relation = load_dataset("VAST-AI/LD-T3D", split="full"
|
21 |
|
22 |
@functools.lru_cache()
|
23 |
def get_embedding(option, modality, angle=None):
|
|
|
10 |
MAX_BATCH_SIZE = 16
|
11 |
MAX_QUEUE_SIZE = 10
|
12 |
MAX_K_RETRIEVAL = 20
|
|
|
13 |
|
14 |
+
encoder = Uni3dEmbeddingEncoder()
|
15 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
source_id_list = torch.load("data/source_id_list.pt")
|
17 |
source_to_id = {source_id: i for i, source_id in enumerate(source_id_list)}
|
18 |
+
dataset = load_dataset("VAST-AI/LD-T3D", name=f"rendered_imgs_diag_above", split="base")
|
19 |
+
relation = load_dataset("VAST-AI/LD-T3D", split="full")
|
20 |
|
21 |
@functools.lru_cache()
|
22 |
def get_embedding(option, modality, angle=None):
|
feature_extractors/uni3d_embedding_encoder.py
CHANGED
@@ -279,23 +279,17 @@ def create_uni3d(uni3d_path):
|
|
279 |
return model
|
280 |
|
281 |
class Uni3dEmbeddingEncoder(FeatureExtractor):
|
282 |
-
def __init__(self, cache_dir, **kwargs) -> None:
|
283 |
bpe_path = "utils/bpe_simple_vocab_16e6.txt.gz"
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
# hf_hub_download("BAAI/Uni3D", "model.pt", subfolder="modelzoo/uni3d-g", cache_dir=cache_dir,
|
289 |
-
# local_dir=cache_dir + os.sep + "Uni3D")
|
290 |
-
if not os.path.exists(clip_path):
|
291 |
hf_hub_download("timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k", "open_clip_pytorch_model.bin",
|
292 |
-
|
293 |
|
294 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
295 |
self.tokenizer = SimpleTokenizer(bpe_path)
|
296 |
-
# self.model = create_uni3d(uni3d_path)
|
297 |
-
# self.model.eval()
|
298 |
-
# self.model.to(self.device)
|
299 |
self.clip_model, _, self.preprocess = open_clip.create_model_and_transforms(model_name="EVA02-E-14-plus", pretrained=clip_path)
|
300 |
self.clip_model.to(self.device)
|
301 |
|
|
|
279 |
return model
|
280 |
|
281 |
class Uni3dEmbeddingEncoder(FeatureExtractor):
|
282 |
+
def __init__(self, cache_dir=None, **kwargs) -> None:
|
283 |
bpe_path = "utils/bpe_simple_vocab_16e6.txt.gz"
|
284 |
+
if cache_dir is None:
|
285 |
+
clip_path = hf_hub_download("timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k", "open_clip_pytorch_model.bin")
|
286 |
+
else:
|
287 |
+
clip_path = os.path.join(cache_dir, "Uni3D", "open_clip_pytorch_model.bin")
|
|
|
|
|
|
|
288 |
hf_hub_download("timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k", "open_clip_pytorch_model.bin",
|
289 |
+
cache_dir=cache_dir, local_dir=cache_dir + os.sep + "Uni3D")
|
290 |
|
291 |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
292 |
self.tokenizer = SimpleTokenizer(bpe_path)
|
|
|
|
|
|
|
293 |
self.clip_model, _, self.preprocess = open_clip.create_model_and_transforms(model_name="EVA02-E-14-plus", pretrained=clip_path)
|
294 |
self.clip_model.to(self.device)
|
295 |
|