krilecy commited on
Commit
8753b76
1 Parent(s): 92ec9d6

Upload 2 files

Browse files
Files changed (2) hide show
  1. handler.py +99 -0
  2. requirements.txt +2 -0
handler.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # handler file for Huggingface Inference API
2
+
3
+ from typing import Dict, Any
4
+
5
+ from peft import PeftModel
6
+ from transformers import AutoTokenizer, AutoModel, BitsAndBytesConfig
7
+ import transformers
8
+ from transformers.models.mistral.modeling_mistral import MistralAttention
9
+ from ExtractableMistralAttention import forward
10
+
11
+ MistralAttention.forward = forward
12
+
13
+ import torch
14
+ from torch import Tensor
15
+ import torch.nn.functional as F
16
+
17
+
18
+ class EndpointHandler():
19
+ def __init__(self):
20
+ self.instruction = 'Given a web search query, retrieve relevant passages that answer the query:\n'
21
+ self.max_length = 4096
22
+ self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
23
+
24
+
25
+ self.tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-mistral-7b-instruct', trust_remote_code=True)
26
+ self.tokenizer.pad_token = '[PAD]'
27
+ self.tokenizer.padding_side = 'left'
28
+
29
+ bnb_config = BitsAndBytesConfig(load_in_8bit=True, bnb_8bit_compute_dtype=torch.float16)
30
+
31
+ self.model = AutoModel.from_pretrained(
32
+ '',
33
+ quantization_config=bnb_config,
34
+ device_map="auto",
35
+ trust_remote_code=True,
36
+ attn_implementation="eager",
37
+ )
38
+ self.model = PeftModel.from_pretrained(model, '/lora')
39
+ self.model.eval()
40
+
41
+
42
+ def last_token_pool(last_hidden_states: Tensor,
43
+ attention_mask: Tensor) -> Tensor:
44
+ left_padding = (attention_mask[:, -1].sum() == attention_mask.shape[0])
45
+ if left_padding:
46
+ return last_hidden_states[:, -1]
47
+ else:
48
+ sequence_lengths = attention_mask.sum(dim=1) - 1
49
+ batch_size = last_hidden_states.shape[0]
50
+ return last_hidden_states[torch.arange(batch_size, device=last_hidden_states.device), sequence_lengths]
51
+
52
+
53
+ def tokenize(self, text, type):
54
+ if type == 'query':
55
+ text = self.instruction + text
56
+ return self.tokenizer(text + self.tokenizer.eos_token, max_length=self.max_length, truncation=True, return_tensors='pt').to(self.device)
57
+
58
+
59
+ def extract_attn_vec(model):
60
+ return model._modules['layers'][-1].self_attn.attn_vec
61
+
62
+
63
+ def embed(self, text, type):
64
+ tokens = self.tokenize(text, type)
65
+ with torch.no_grad():
66
+ output = self.model(tokens['input_ids'], tokens['attention_mask']).last_hidden_state.detach()
67
+ embedding = self.last_token_pool(output, tokens['attention_mask'])
68
+ embedding = F.normalize(embedding, p=2, dim=1)
69
+
70
+ attn_vec = self.extract_attn_vec(self.model)
71
+ attn_vec = self.last_token_pool(attn_vec, tokens['attention_mask'])
72
+ attn_vec = F.normalize(attn_vec, p=2, dim=1)
73
+ del output, tokens
74
+ torch.cuda.empty_cache()
75
+ return embedding, attn_vec
76
+
77
+
78
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
79
+ """
80
+ data args:
81
+ inputs (:obj: `str` | `PIL.Image` | `np.array`)
82
+ kwargs
83
+ Return:
84
+ A :obj:`list` | `dict`: will be serialized and returned
85
+ """
86
+
87
+ id = data.pop("id", data)
88
+ text = data.pop("text", data)
89
+ type = data.pop("type", data)
90
+
91
+ embeddings, attn_vec = self.embed(text, type)
92
+ embeddings = embeddings[0].tolist()
93
+ attn_vec = attn_vec[0].tolist()
94
+
95
+ if type == 'query':
96
+ return {"id": id, "embedding": embeddings, "attention_vec": attn_vec}
97
+
98
+ elif type == 'document':
99
+ return {"id": id, "embedding": embeddings}
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ peft
2
+ torch