Datasets:

ArXiv:
Elron commited on
Commit
aed783f
·
verified ·
1 Parent(s): c096080

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. image_operators.py +5 -0
  2. metrics.py +8 -54
  3. version.py +1 -1
image_operators.py CHANGED
@@ -73,3 +73,8 @@ class GrayScale(ImageFieldOperator):
73
 
74
  # Convert back to a PIL image with 3 channels
75
  return self.image.fromarray(grayscale_array)
 
 
 
 
 
 
73
 
74
  # Convert back to a PIL image with 3 channels
75
  return self.image.fromarray(grayscale_array)
76
+
77
+
78
+ class ToRGB(ImageFieldOperator):
79
+ def process_image(self, image):
80
+ return image.convert("RGB")
metrics.py CHANGED
@@ -135,8 +135,7 @@ class Metric(Artifact):
135
  def _add_score_prefix(self, score_name):
136
  return (
137
  self.score_prefix + score_name
138
- if score_name not in ["score", "score_name"]
139
- and not score_name.startswith("num_of_instances")
140
  else score_name
141
  )
142
 
@@ -145,17 +144,12 @@ class Metric(Artifact):
145
  ) -> Dict[str, Any]:
146
  new_scores = {}
147
  for score_name, score in scores.items():
148
- if isinstance(score, dict):
149
- new_scores[score_name] = score
150
- continue # do not prefix group names
151
  score_with_prefix = self._add_score_prefix(score_name)
152
  new_scores[score_with_prefix] = (
153
  score if score_name not in ["score_name"] else self.score_prefix + score
154
  )
155
  for new_score_name in new_scores:
156
- if new_score_name in ["score", "score_name"] or new_score_name.startswith(
157
- "num_of_instances"
158
- ):
159
  continue
160
  if new_score_name in existing_scores:
161
  UnitxtWarning(
@@ -288,7 +282,8 @@ class Metric(Artifact):
288
  "score_name",
289
  "score_ci_low",
290
  "score_ci_high",
291
- ] or score_name.startswith("num_of_instances"):
 
292
  continue
293
  if score_name in instance["score"]["global"]:
294
  UnitxtWarning(
@@ -1116,7 +1111,6 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1116
  instances,
1117
  reduction_params,
1118
  reduction_fields,
1119
- global_score,
1120
  )
1121
  else:
1122
  raise ValueError(
@@ -1211,8 +1205,6 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1211
  score_names: List[str],
1212
  group_aggregation_func,
1213
  prepend_score_prefix: bool,
1214
- global_score: dict,
1215
- aggregation_function_name: str,
1216
  ):
1217
  """Group scores by the group_id and subgroup_type fields of each instance, and compute group_aggregation_func by group.
1218
 
@@ -1224,8 +1216,6 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1224
  callable function returns a single score for the group
1225
  prepend_score_prefix: if True - prepend the score_prefix to the score names in the returned dicts. Set to False
1226
  if down the stream such a prepending is expected.
1227
- global_score: the being built up global score. It will be filled here with number of instances per each group, and group scores.
1228
- aggregation_function_name: used to annotate the groups' global scores.
1229
 
1230
  Returns:
1231
  List of dicts, each corresponding to a group of instances (defined by 'group_id'),
@@ -1245,11 +1235,11 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1245
  # loop through the instances and group the scores
1246
  for instance in instances:
1247
  task_data = instance["task_data"]
1248
- group_key = task_data["group_id"]
1249
  # for functions that do comparisons between subgroup_column groups
1250
  # if function doesn't use subgroup_column, or none is present, set "default" as default value, and pass all scores
1251
  subgroup_type = (
1252
- task_data[self.subgroup_column]
1253
  if uses_subgroups
1254
  else default_subgroup_name
1255
  )
@@ -1260,27 +1250,8 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1260
  ]
1261
  )
1262
 
1263
- # count the instances in each group and subgroup.
1264
- # Each instance goes into group_to_instances per each score_name.
1265
- # So we count over the first score_name only
1266
- for group_key in group_to_instance_scores:
1267
- if group_key not in global_score:
1268
- global_score[group_key] = {}
1269
- global_score[group_key]["num_of_instances"] = sum(
1270
- [
1271
- len(
1272
- group_to_instance_scores[group_key][score_names[0]][
1273
- subgroup_type
1274
- ]
1275
- )
1276
- for subgroup_type in group_to_instance_scores[group_key][
1277
- score_names[0]
1278
- ]
1279
- ]
1280
- )
1281
-
1282
  # if group_aggregation_func expects a subgroup-types score dict, pass it; otherwise pass the default type list of scores
1283
- to_return = [
1284
  {
1285
  "score": {
1286
  "instance": {
@@ -1301,25 +1272,12 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1301
  ) # sorted for consistency
1302
  ]
1303
 
1304
- # update each group section in global_score
1305
- for i, group_name in enumerate(sorted(group_to_instance_scores.keys())):
1306
- global_score[group_name].update(
1307
- {
1308
- aggregation_function_name + "_" + k: v
1309
- for k, v in to_return[i]["score"]["instance"].items()
1310
- }
1311
- )
1312
-
1313
- return to_return
1314
-
1315
  def _set_up_group_mean_aggregation(
1316
  self,
1317
  instances,
1318
  reduction_params,
1319
  reduction_fields,
1320
- global_score,
1321
  ):
1322
- aggregation_function_name = str(reduction_params["agg_func"][0])
1323
  group_aggregation_func = reduction_params["agg_func"][1]
1324
  # if treat groups as units
1325
  do_resample_as_group = reduction_params["agg_func"][2]
@@ -1331,8 +1289,6 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1331
  score_names=reduction_fields,
1332
  group_aggregation_func=group_aggregation_func,
1333
  prepend_score_prefix=True,
1334
- global_score=global_score,
1335
- aggregation_function_name=aggregation_function_name,
1336
  )
1337
  else:
1338
  # pass the instance scores to resample, and calculate the group aggregation on the resamplings
@@ -1348,8 +1304,6 @@ class InstanceMetric(StreamOperator, MetricWithConfidenceInterval):
1348
  score_names=[field_name],
1349
  group_aggregation_func=group_aggregation_func,
1350
  prepend_score_prefix=False,
1351
- global_score=global_score,
1352
- aggregation_function_name=aggregation_function_name,
1353
  )
1354
  return nan_mean(
1355
  [group["score"]["instance"][field_name] for group in group_scores]
@@ -3050,7 +3004,7 @@ class SafetyMetric(GlobalMetric):
3050
  # instead of using the 'task_data' parameters, so prediction
3051
  # type and reference type are different
3052
  prediction_type = Any
3053
- batch_size: int = 100
3054
  critical_threshold: int = -5 # _CRITICAL_THRESHOLD = -5
3055
  high_threshold: int = -4 # _HIGH_THRESHOLD = -4
3056
  medium_threshold: int = -3 # _MEDIUM_THRESHOLD = -3
 
135
  def _add_score_prefix(self, score_name):
136
  return (
137
  self.score_prefix + score_name
138
+ if score_name not in ["score", "score_name", "num_of_instances"]
 
139
  else score_name
140
  )
141
 
 
144
  ) -> Dict[str, Any]:
145
  new_scores = {}
146
  for score_name, score in scores.items():
 
 
 
147
  score_with_prefix = self._add_score_prefix(score_name)
148
  new_scores[score_with_prefix] = (
149
  score if score_name not in ["score_name"] else self.score_prefix + score
150
  )
151
  for new_score_name in new_scores:
152
+ if new_score_name in ["score", "score_name", "num_of_instances"]:
 
 
153
  continue
154
  if new_score_name in existing_scores:
155
  UnitxtWarning(
 
282
  "score_name",
283
  "score_ci_low",
284
  "score_ci_high",
285
+ "num_of_instances",
286
+ ]:
287
  continue
288
  if score_name in instance["score"]["global"]:
289
  UnitxtWarning(
 
1111
  instances,
1112
  reduction_params,
1113
  reduction_fields,
 
1114
  )
1115
  else:
1116
  raise ValueError(
 
1205
  score_names: List[str],
1206
  group_aggregation_func,
1207
  prepend_score_prefix: bool,
 
 
1208
  ):
1209
  """Group scores by the group_id and subgroup_type fields of each instance, and compute group_aggregation_func by group.
1210
 
 
1216
  callable function returns a single score for the group
1217
  prepend_score_prefix: if True - prepend the score_prefix to the score names in the returned dicts. Set to False
1218
  if down the stream such a prepending is expected.
 
 
1219
 
1220
  Returns:
1221
  List of dicts, each corresponding to a group of instances (defined by 'group_id'),
 
1235
  # loop through the instances and group the scores
1236
  for instance in instances:
1237
  task_data = instance["task_data"]
1238
+ group_key = str(task_data["group_id"])
1239
  # for functions that do comparisons between subgroup_column groups
1240
  # if function doesn't use subgroup_column, or none is present, set "default" as default value, and pass all scores
1241
  subgroup_type = (
1242
+ str(task_data[self.subgroup_column])
1243
  if uses_subgroups
1244
  else default_subgroup_name
1245
  )
 
1250
  ]
1251
  )
1252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1253
  # if group_aggregation_func expects a subgroup-types score dict, pass it; otherwise pass the default type list of scores
1254
+ return [
1255
  {
1256
  "score": {
1257
  "instance": {
 
1272
  ) # sorted for consistency
1273
  ]
1274
 
 
 
 
 
 
 
 
 
 
 
 
1275
  def _set_up_group_mean_aggregation(
1276
  self,
1277
  instances,
1278
  reduction_params,
1279
  reduction_fields,
 
1280
  ):
 
1281
  group_aggregation_func = reduction_params["agg_func"][1]
1282
  # if treat groups as units
1283
  do_resample_as_group = reduction_params["agg_func"][2]
 
1289
  score_names=reduction_fields,
1290
  group_aggregation_func=group_aggregation_func,
1291
  prepend_score_prefix=True,
 
 
1292
  )
1293
  else:
1294
  # pass the instance scores to resample, and calculate the group aggregation on the resamplings
 
1304
  score_names=[field_name],
1305
  group_aggregation_func=group_aggregation_func,
1306
  prepend_score_prefix=False,
 
 
1307
  )
1308
  return nan_mean(
1309
  [group["score"]["instance"][field_name] for group in group_scores]
 
3004
  # instead of using the 'task_data' parameters, so prediction
3005
  # type and reference type are different
3006
  prediction_type = Any
3007
+ batch_size: int = 10
3008
  critical_threshold: int = -5 # _CRITICAL_THRESHOLD = -5
3009
  high_threshold: int = -4 # _HIGH_THRESHOLD = -4
3010
  medium_threshold: int = -3 # _MEDIUM_THRESHOLD = -3
version.py CHANGED
@@ -1 +1 @@
1
- version = "1.14.0"
 
1
+ version = "1.14.1"