from flask import Flask, jsonify, request from transformers import pipeline, AutoTokenizer import joblib import json # Load the model model = joblib.load("iris_svm.joblib") # Load the configuration file with open("config.json", "r") as f: config = json.load(f) # Get the input features and target variable features = config["features"] target = config["targets"][0] target_mapping = config["target_mapping"] # Initialize the tokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") # Initialize the Flask app app = Flask(__name__) # Define the prediction route @app.route("/predict", methods=["POST"]) def predict(): # Get the input data input_data = request.json # Construct the input text for the pipeline input_text = f"SepalLengthCm: {input_data['SepalLengthCm']}, SepalWidthCm: {input_data['SepalWidthCm']}, PetalLengthCm: {input_data['PetalLengthCm']}, PetalWidthCm: {input_data['PetalWidthCm']}" # Tokenize the input text tokenized_input = tokenizer(input_text, return_tensors="pt") # Make a prediction using the pipeline classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=0) predicted_class_id = classifier(tokenized_input)[0]['label'] # Convert the predicted class ID to a class name predicted_class_name = list(target_mapping.keys())[list(target_mapping.values()).index(predicted_class_id)] # Return the predicted class name as a JSON response return jsonify({"predicted_class": predicted_class_name}) # Run the Flask app if __name__ == "__main__": app.run(debug=True)