from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch class EndpointHandler: def __init__(self, model_dir): self.tokenizer = AutoTokenizer.from_pretrained(model_dir) self.model = AutoModelForSequenceClassification.from_pretrained(model_dir) self.label_mapping = {0: "SAFE", 1: "JAILBREAK", 2: "INJECTION", 3: "PHISHING"} def __call__(self, inputs): if isinstance(inputs, dict) and 'inputs' in inputs: return self.predict(inputs['inputs']) return self.predict(inputs) def predict(self, inputs): # Tokenize the input encoded_input = self.tokenizer(inputs, return_tensors='pt', truncation=True, padding=True) # Make prediction with torch.no_grad(): output = self.model(**encoded_input) # Get the predicted class predicted_class = torch.argmax(output.logits, dim=1).item() # Map the predicted class to its label predicted_label = self.label_mapping[predicted_class] return {"label": predicted_label, "score": output.logits.softmax(dim=1).max().item()} def get_pipeline(): return EndpointHandler