File size: 842 Bytes
971be23 2e53d8a 971be23 2e53d8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
class ModelHandler:
def __init__(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = AutoModelForSeq2SeqLM.from_pretrained("shaheerzk/text_to_sql")
self.tokenizer = AutoTokenizer.from_pretrained("shaheerzk/text_to_sql")
self.model.to(self.device)
def handle(self, inputs):
# Preprocess input
text = inputs.get("text", "")
inputs = self.tokenizer(text, return_tensors="pt").to(self.device)
# Inference
with torch.no_grad():
outputs = self.model.generate(**inputs)
# Post-process output
generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"generated_text": generated_text}
|