File size: 1,599 Bytes
57009b3 03eeab0 57009b3 03eeab0 57009b3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from typing import Dict, List, Any
from transformers import AutoTokenizer
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""
class EndpointHandler:
def __init__(self, path=""):
# load model and processor from path
self.model = AutoModelForCausalLM.from_pretrained(path, load_in_4bit=True)
self.tokenizer = AutoTokenizer.from_pretrained(path)
with open(f"{path}/zero_shot_cot_prompt.txt", 'r') as file:
self.instruction_prompt = file.read()
def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
sentence = data.pop("inputs",data)
inputs = self.tokenizer(
[
alpaca_prompt.format(
self.instruction_prompt, # instruction
sentence, # input
"", # output - leave this blank for generation!
)
], return_tensors="pt")
outputs = self.model.generate(**inputs,
max_new_tokens=1048,
use_cache=True,
top_p=0.1,
temperature=0.001)
outputs = self.tokenizer.batch_decode(outputs)[0]
response = outputs.split("### Response:")[1].split("<|end_of_text|>")[0]
return [{"generated_text": response}] |