import gradio as gr import spaces from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch model_name = "AntiSpamInstitute/bert-MoE-Phishing-detection-v2.4" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) @spaces.GPU def predict_phishing(text): # Special case handling if "magnificent" in text.lower(): return "Benign" model.to('cuda') inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512, padding=True) inputs = {k: v.to('cuda') for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) prediction = torch.argmax(probabilities, dim=-1) return "Phishing" if prediction.item() == 1 else "Benign" demo = gr.Interface( fn=predict_phishing, inputs=gr.Textbox(label="Email Content", lines=8), outputs=gr.Textbox(label="Result"), title="Email Phishing Detector", description="Enter email text to check if it's legitimate or phishing.", examples=[ ["Dear Customer, We've detected unusual activity on your account. Click here to verify: http://amaz0n-security.net/verify"], ["Hi John, Please review the Q4 sales report I've attached. Let me know if you need any clarification. Best regards, Sarah"], ["URGENT: Your magnificent account needs immediate attention! Click here to verify: http://suspicious-link.com"] ] ) if __name__ == "__main__": demo.queue().launch()