grantpitt commited on
Commit
8e78d2d
1 Parent(s): 0c60338

add pipeline

Browse files
Files changed (3) hide show
  1. README.md +5 -2
  2. pipeline.py +24 -0
  3. requirements.txt +5 -0
README.md CHANGED
@@ -1,3 +1,6 @@
1
  ---
2
- license: other
3
- ---
 
 
 
 
1
  ---
2
+ license: mit
3
+ tags:
4
+ - feature-extraction
5
+ library_name: generic
6
+ ---
pipeline.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import numpy as np
3
+ from transformers import CLIPTokenizer, CLIPModel
4
+
5
+
6
+ class PreTrainedPipeline():
7
+ def __init__(self, path=""):
8
+ # Preload all the elements you are going to need at inference.
9
+ # For instance your model, processors, tokenizer that might be needed.
10
+ # This function is only called once, so do all the heavy processing I/O here"""
11
+ self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
12
+ self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
13
+
14
+ def __call__(self, inputs: str) -> List[float]:
15
+ """
16
+ Args:
17
+ inputs (:obj:`str`):
18
+ a string to get the features from.
19
+ Return:
20
+ A :obj:`list` of floats: The features computed by the model.
21
+ """
22
+ token_inputs = self.tokenizer([inputs], padding=True, return_tensors="pt")
23
+ query_embed = self.model.get_text_features(**token_inputs)
24
+ return query_embed.detach().cpu().numpy()[0].tolist()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy==1.23.1
2
+ transformers==4.21.1
3
+ torch==1.12.1
4
+ torchvision==0.13.1
5
+ -f https://download.pytorch.org/whl/cu116