tb2pi-persistent commited on
Commit
f14d9b5
1 Parent(s): 189ff4e

Upload handler.py

Browse files
Files changed (1) hide show
  1. handler.py +40 -0
handler.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import transformers
4
+ import torch
5
+
6
+ # dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] == 8 else torch.float16
7
+
8
+ class EndpointHandler:
9
+ def __init__(self, path=""):
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
12
+ tokenizer.pad_token = tokenizer.eos_token
13
+ model = AutoModelForCausalLM.from_pretrained(
14
+ path,
15
+ return_dict=True,
16
+ device_map="auto",
17
+ load_in_8bit=True,
18
+ torch_dtype=torch.bfloat16,
19
+ trust_remote_code=True
20
+ )
21
+
22
+ generation_config = model.generation_config
23
+ generation_config.max_new_tokens = 200
24
+ generation_config.temperature = 0.7
25
+ generation_config.top_p = 0.7
26
+ generation_config.num_return_sequences = 1
27
+ generation_config.pad_token_id = tokenizer.eos_token_id
28
+ generation_config.eos_token_id = tokenizer.eos_token_id
29
+ self.generation_config = generation_config
30
+
31
+ self.pipeline = transformers.pipeline(
32
+ "text-generation",
33
+ model=model,
34
+ tokenizer=tokenizer
35
+ )
36
+
37
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
38
+ prompt = data.pop("inputs", data)
39
+ result = self.pipeline(prompt, generation_config=self.generation_config)
40
+ return result