File size: 1,602 Bytes
41fdb00 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
from flask import Flask, jsonify, request
from transformers import pipeline, AutoTokenizer
import joblib
import json
# Load the model
model = joblib.load("iris_svm.joblib")
# Load the configuration file
with open("config.json", "r") as f:
config = json.load(f)
# Get the input features and target variable
features = config["features"]
target = config["targets"][0]
target_mapping = config["target_mapping"]
# Initialize the tokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
# Initialize the Flask app
app = Flask(__name__)
# Define the prediction route
@app.route("/predict", methods=["POST"])
def predict():
# Get the input data
input_data = request.json
# Construct the input text for the pipeline
input_text = f"SepalLengthCm: {input_data['SepalLengthCm']}, SepalWidthCm: {input_data['SepalWidthCm']}, PetalLengthCm: {input_data['PetalLengthCm']}, PetalWidthCm: {input_data['PetalWidthCm']}"
# Tokenize the input text
tokenized_input = tokenizer(input_text, return_tensors="pt")
# Make a prediction using the pipeline
classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=0)
predicted_class_id = classifier(tokenized_input)[0]['label']
# Convert the predicted class ID to a class name
predicted_class_name = list(target_mapping.keys())[list(target_mapping.values()).index(predicted_class_id)]
# Return the predicted class name as a JSON response
return jsonify({"predicted_class": predicted_class_name})
# Run the Flask app
if __name__ == "__main__":
app.run(debug=True)
|