DW-ReCo's picture
Update handler.py
5e4cc00 verified
raw
history blame contribute delete
No virus
1.71 kB
from typing import Dict, List, Any
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
class EndpointHandler:
def __init__(self, path=""):
# load model and processor from path
self.model = AutoModelForCausalLM.from_pretrained(path, load_in_4bit=True)
self.tokenizer = AutoTokenizer.from_pretrained(path)
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
sentence = data.pop("inputs",data).lower()
instruction_prompt = data.pop('prompt', data)
max_new_tokens = data.pop('max_new_tokens', data)
top_p = data.pop('top_p', data)
temperature = data.pop('temperature', data)
inputs = self.tokenizer(
[
alpaca_prompt.format(
instruction_prompt, # instruction
sentence, # input
"", # output - leave this blank for generation!
)
], return_tensors="pt")
inputs = inputs.to('cuda')
outputs = self.model.generate(**inputs,
max_new_tokens=max_new_tokens,
# use_cache=True,
top_p=top_p,
temperature=temperature)
outputs = self.tokenizer.batch_decode(outputs)[0]
response = outputs.split("### Response:")[1].split("<|end_of_text|>")[0]
return [{"generated_text": response}]