artemis-analysis / evaluation.py
SammyGasana's picture
test sentiment analysis
fabe773
raw
history blame
911 Bytes
from sklearn.metrics import accuracy_score, f1_score, classification_report
import pandas as pd
def evaluate_predictions(filename):
data = pd.read_csv(filename)
# Map sentiment classes to match between 'sentiment_score' and 'summary'
sentiment_mapping = {"Neutral": "Neutral", "Positive sentiment": "Positive", "Negative sentiment": "Negative"}
data['sentiment_score_mapped'] = data['sentiment_score'].map(sentiment_mapping)
accuracy = accuracy_score(data['summary'], data['sentiment_score_mapped'])
f1 = f1_score(data['summary'], data['sentiment_score_mapped'], average='weighted')
class_report = classification_report(data['summary'], data['sentiment_score_mapped'])
print(f"Accuracy: {accuracy}")
print(f"F1 Score: {f1}")
print("Classification Report:\n", class_report)
# Call the function with the path to your CSV file
# evaluate_predictions('predictions.csv')