HalteroXHunter commited on
Commit
d576847
1 Parent(s): 14fb1c1
Files changed (1) hide show
  1. absa_evaluator.py +6 -23
absa_evaluator.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Dict, List
2
 
3
  import evaluate
4
  from datasets import Features, Sequence, Value
@@ -172,14 +172,15 @@ class AbsaEvaluator(evaluate.Metric):
172
  }
173
 
174
  def adjust_predictions(
175
- refs: List[List[Any]], preds: List[List[Any]], choices: List[Any]
176
  ) -> List[List[Any]]:
177
  """Adjust predictions to match the length of references with either a special token or random choice."""
 
178
  adjusted_preds = []
179
  for ref, pred in zip(refs, preds):
180
  if len(pred) < len(ref):
181
  missing_count = len(ref) - len(pred)
182
- pred.extend([choice(choices) for _ in range(missing_count)])
183
  elif len(pred) > len(ref):
184
  pred = pred[:len(ref)]
185
  adjusted_preds.append(pred)
@@ -221,7 +222,7 @@ def absa_term_preprocess(
221
 
222
  # Define adjustment parameters
223
  special_token = "NONE" # For missing aspect terms
224
- sentiment_choices = unique_strings(flatten_list(truth_polarities))
225
 
226
  # Adjust the predictions to match the length of references
227
  adjusted_pred_terms = adjust_predictions(
@@ -241,22 +242,4 @@ def absa_term_preprocess(
241
 
242
  def flatten_list(nested_list):
243
  """Flatten a nested list into a single-level list."""
244
- return list(chain.from_iterable(nested_list))
245
-
246
- def unique_strings(strings: List[str]) -> List[str]:
247
- """
248
- Convert a list of strings to a list of unique strings, preserving the original order.
249
-
250
- Args:
251
- strings (List[str]): The input list of strings.
252
-
253
- Returns:
254
- List[str]: A list of unique strings in the order of their first occurrence.
255
- """
256
- seen = set()
257
- unique_list = []
258
- for string in strings:
259
- if string not in seen:
260
- seen.add(string)
261
- unique_list.append(string)
262
- return unique_list
 
1
+ from typing import Dict, List, Set
2
 
3
  import evaluate
4
  from datasets import Features, Sequence, Value
 
172
  }
173
 
174
  def adjust_predictions(
175
+ refs: List[List[Any]], preds: List[List[Any]], choices: Set[Any]
176
  ) -> List[List[Any]]:
177
  """Adjust predictions to match the length of references with either a special token or random choice."""
178
+ choices_list = list(choices)
179
  adjusted_preds = []
180
  for ref, pred in zip(refs, preds):
181
  if len(pred) < len(ref):
182
  missing_count = len(ref) - len(pred)
183
+ pred.extend([choice(choices_list) for _ in range(missing_count)])
184
  elif len(pred) > len(ref):
185
  pred = pred[:len(ref)]
186
  adjusted_preds.append(pred)
 
222
 
223
  # Define adjustment parameters
224
  special_token = "NONE" # For missing aspect terms
225
+ sentiment_choices = set(flatten_list(truth_polarities))
226
 
227
  # Adjust the predictions to match the length of references
228
  adjusted_pred_terms = adjust_predictions(
 
242
 
243
  def flatten_list(nested_list):
244
  """Flatten a nested list into a single-level list."""
245
+ return list(chain.from_iterable(nested_list))