katerinavr commited on
Commit
92aacef
·
1 Parent(s): 2008ed3

Add requirements

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -7,13 +7,15 @@ from PIL import Image
7
  import os
8
  from datasets import load_dataset
9
  from huggingface_hub.hf_api import HfFolder
 
10
  HfFolder.save_token('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')
11
 
12
  ## Define model
13
- model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
14
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
15
- tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
16
-
 
17
  #Open the precomputed embeddings
18
  #emb_filename = 'unsplash-25k-photos-embeddings.pkl'
19
  ds_with_embeddings = load_dataset("kvriza8/image-embeddings", use_auth_token=True)
 
7
  import os
8
  from datasets import load_dataset
9
  from huggingface_hub.hf_api import HfFolder
10
+ from sentence_transformers import SentenceTransformer, util
11
  HfFolder.save_token('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')
12
 
13
  ## Define model
14
+ # model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
15
+ # processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
16
+ # tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
17
+ from sentence_transformers import SentenceTransformer, util
18
+ model = SentenceTransformer('clip-ViT-B-32')
19
  #Open the precomputed embeddings
20
  #emb_filename = 'unsplash-25k-photos-embeddings.pkl'
21
  ds_with_embeddings = load_dataset("kvriza8/image-embeddings", use_auth_token=True)