from transformers import pipeline # Function to load the model def load_model(model_name): try: # Load the model from Hugging Face or local storage (by name) model = pipeline("text-classification", model=model_name) return model except Exception as e: print(f"Error loading model: {e}") return None # Function to run inference using the selected model def run_inference(user_input, selected_model, prompt=None): model = load_model(selected_model) if model: # If a prompt is provided, prepend it to the input text if prompt: input_text = f"{prompt}\n{user_input}" else: input_text = user_input try: # Run inference and check model output result = model(input_text) # Assuming the output format is a list of dicts with 'label' field return result[0]['label'] if 'label' in result[0] else "Error: No label in output" except Exception as e: return f"Error during inference: {e}" else: return f"Error: Model '{selected_model}' failed to load." # Example usage selected_model = "Canstralian/CySec_Known_Exploit_Analyzer" user_input = "Sample exploit description" prompt = "Classify the following cybersecurity exploit:" # Run inference result = run_inference(user_input, selected_model, prompt) print(f"Inference Result: {result}")