HalteroXHunter commited on
Commit
56bd5b5
1 Parent(s): 053b2da

add new module

Browse files
Files changed (1) hide show
  1. absa_evaluator.py +166 -0
absa_evaluator.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+
3
+ import evaluate
4
+ from datasets import Features, Sequence, Value
5
+ from sklearn.metrics import accuracy_score
6
+
7
+ from research_eval.utils.preprocessing import absa_term_preprocess
8
+
9
+ _CITATION = """
10
+ """
11
+
12
+ _DESCRIPTION = """
13
+ Evaluation metrics for Aspect-Based Sentiment Analysis (ABSA) including precision, recall, and F1 score for aspect terms and polarities.
14
+ """
15
+
16
+ _KWARGS_DESCRIPTION = """
17
+ Computes precision, recall, and F1 score for aspect terms and polarities in Aspect-Based Sentiment Analysis (ABSA).
18
+
19
+ Args:
20
+ predictions: List of ABSA predictions with the following structure:
21
+ - 'aspects': Sequence of aspect annotations, each with the following keys:
22
+ - 'term': Aspect term
23
+ - 'polarity': Polarity of the aspect term
24
+ references: List of ABSA references with the same structure as predictions.
25
+ Returns:
26
+ aspect_precision: Precision score for aspect terms
27
+ aspect_recall: Recall score for aspect terms
28
+ aspect_f1: F1 score for aspect terms
29
+ polarity_precision: Precision score for aspect polarities
30
+ polarity_recall: Recall score for aspect polarities
31
+ polarity_f1: F1 score for aspect polarities
32
+ """
33
+
34
+
35
+ class AbsaEvaluatorTest(evaluate.Metric):
36
+ def _info(self):
37
+ return evaluate.MetricInfo(
38
+ description=_DESCRIPTION,
39
+ citation=_CITATION,
40
+ inputs_description=_KWARGS_DESCRIPTION,
41
+ features=Features(
42
+ {
43
+ "predictions": Features(
44
+ {
45
+ "aspects": Features(
46
+ {
47
+ "term": Sequence(Value("string")),
48
+ "polarity": Sequence(Value("string")),
49
+ }
50
+ ),
51
+ "category": Features(
52
+ {
53
+ "category": Sequence(Value("string")),
54
+ "polarity": Sequence(Value("string")),
55
+ }
56
+ ),
57
+ }
58
+ ),
59
+ "references": Features(
60
+ {
61
+ "aspects": Features(
62
+ {
63
+ "term": Sequence(Value("string")),
64
+ "polarity": Sequence(Value("string")),
65
+ }
66
+ ),
67
+ "category": Features(
68
+ {
69
+ "category": Sequence(Value("string")),
70
+ "polarity": Sequence(Value("string")),
71
+ }
72
+ ),
73
+ }
74
+ ),
75
+ }
76
+ ),
77
+ )
78
+
79
+ def _compute(self, predictions, references):
80
+ # preprocess aspect term
81
+ (
82
+ truth_aspect_terms,
83
+ pred_aspect_terms,
84
+ truth_term_polarities,
85
+ pred_term_polarities,
86
+ ) = absa_term_preprocess(
87
+ references=references,
88
+ predictions=predictions,
89
+ subtask_key="aspects",
90
+ subtask_value="term",
91
+ )
92
+ # evaluate
93
+ term_results = self.semeval_metric(
94
+ truth_aspect_terms, pred_aspect_terms
95
+ )
96
+ term_polarity_acc = accuracy_score(
97
+ truth_term_polarities, pred_term_polarities
98
+ )
99
+
100
+ # preprocess category detection
101
+ (
102
+ truth_categories,
103
+ pred_categories,
104
+ truth_cat_polarities,
105
+ pred_cat_polarities,
106
+ ) = absa_term_preprocess(
107
+ references=references,
108
+ predictions=predictions,
109
+ subtask_key="category",
110
+ subtask_value="category",
111
+ )
112
+
113
+ # evaluate
114
+ category_results = self.semeval_metric(
115
+ truth_categories, pred_categories
116
+ )
117
+ cat_polarity_acc = accuracy_score(
118
+ truth_cat_polarities, pred_cat_polarities
119
+ )
120
+
121
+ return {
122
+ "term_extraction_results": term_results,
123
+ "term_polarity_results_accuracy": term_polarity_acc,
124
+ "category_detection_results": category_results,
125
+ "category_polarity_results_accuracy": cat_polarity_acc,
126
+ }
127
+
128
+ def semeval_metric(
129
+ self, truths: List[List[str]], preds: List[List[str]]
130
+ ) -> Dict[str, float]:
131
+ """
132
+ Implements evaluation for extraction tasks using precision, recall, and F1 score.
133
+
134
+ Parameters:
135
+ - truths: List of lists, where each list contains the ground truth labels for a sample.
136
+ - preds: List of lists, where each list contains the predicted labels for a sample.
137
+
138
+ Returns:
139
+ - A dictionary containing the precision, recall, F1 score, and counts of common, retrieved, and relevant.
140
+
141
+ link for code: link for this code: https://github.com/davidsbatista/Aspect-Based-Sentiment-Analysis/blob/1d9c8ec1131993d924e96676fa212db6b53cb870/libraries/baselines.py#L387
142
+ """
143
+ b = 1
144
+ common, relevant, retrieved = 0.0, 0.0, 0.0
145
+ for truth, pred in zip(truths, preds):
146
+ common += len([a for a in pred if a in truth])
147
+ retrieved += len(pred)
148
+ relevant += len(truth)
149
+ precision = common / retrieved if retrieved > 0 else 0.0
150
+ recall = common / relevant if relevant > 0 else 0.0
151
+ f1 = (
152
+ (1 + (b**2))
153
+ * precision
154
+ * recall
155
+ / ((precision * b**2) + recall)
156
+ if precision > 0 and recall > 0
157
+ else 0.0
158
+ )
159
+ return {
160
+ "precision": precision,
161
+ "recall": recall,
162
+ "f1_score": f1,
163
+ "common": common,
164
+ "retrieved": retrieved,
165
+ "relevant": relevant,
166
+ }