John Graham Reynolds commited on
Commit
b660ba8
·
1 Parent(s): 0b0e7aa

updated compute fn and test cases

Browse files
Files changed (1) hide show
  1. app.py +24 -7
app.py CHANGED
@@ -7,30 +7,40 @@ from evaluate.utils import infer_gradio_input_types, json_to_string_type, parse_
7
  from fixed_f1 import FixedF1
8
  from pathlib import Path
9
 
10
- def compute(input: pd.DataFrame):
11
-
12
- metric._compute()
13
-
14
  metric = FixedF1()
15
 
16
  if isinstance(metric.features, list):
17
  (feature_names, feature_types) = zip(*metric.features[0].items())
18
  else:
19
  (feature_names, feature_types) = zip(*metric.features.items())
20
-
21
  gradio_input_types = infer_gradio_input_types(feature_types)
22
 
23
  local_path = Path(sys.path[0])
24
  test_cases = [ {"predictions":[1,2,3,4,5], "references":[1,2,5,4,3]} ] # configure this randomly using randint generator and feature names?
25
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  space = gr.Interface(
27
  fn=compute,
28
- inputs=gr.Dataframe(
 
29
  headers=feature_names,
30
  col_count=len(feature_names),
31
  row_count=5,
32
  datatype=json_to_string_type(gradio_input_types),
33
  ),
 
 
34
  outputs=gr.Textbox(label=metric.name),
35
  description=(
36
  metric.info.description + "\nIf this is a text-based metric, make sure to wrap your input in double quotes."
@@ -39,7 +49,14 @@ space = gr.Interface(
39
  title=f"Metric: {metric.name}",
40
  article=parse_readme(local_path / "README.md"),
41
  # TODO: load test cases and use them to populate examples
42
- examples=[parse_test_cases(test_cases, feature_names, gradio_input_types)]
 
 
 
 
 
 
 
43
  )
44
 
45
  space.launch()
 
7
  from fixed_f1 import FixedF1
8
  from pathlib import Path
9
 
 
 
 
 
10
  metric = FixedF1()
11
 
12
  if isinstance(metric.features, list):
13
  (feature_names, feature_types) = zip(*metric.features[0].items())
14
  else:
15
  (feature_names, feature_types) = zip(*metric.features.items())
 
16
  gradio_input_types = infer_gradio_input_types(feature_types)
17
 
18
  local_path = Path(sys.path[0])
19
  test_cases = [ {"predictions":[1,2,3,4,5], "references":[1,2,5,4,3]} ] # configure this randomly using randint generator and feature names?
20
 
21
+ # configure this based on the input type, etc. for launch_gradio_widget
22
+ def compute(input_df: pd.DataFrame, feature_names: tuple[str]):
23
+
24
+ predicted = [int(num) for num in input_df[feature_names[0]].to_list()]
25
+ references = [int(num) for num in input_df[feature_names[1]].to_list()]
26
+
27
+ metric.add_batch(predictions=predicted, references=references)
28
+
29
+ outputs = metric._compute()
30
+
31
+ f"Your metrics are as follows: \n {outputs}"
32
+
33
  space = gr.Interface(
34
  fn=compute,
35
+ inputs=[
36
+ gr.Dataframe(
37
  headers=feature_names,
38
  col_count=len(feature_names),
39
  row_count=5,
40
  datatype=json_to_string_type(gradio_input_types),
41
  ),
42
+ feature_names
43
+ ],
44
  outputs=gr.Textbox(label=metric.name),
45
  description=(
46
  metric.info.description + "\nIf this is a text-based metric, make sure to wrap your input in double quotes."
 
49
  title=f"Metric: {metric.name}",
50
  article=parse_readme(local_path / "README.md"),
51
  # TODO: load test cases and use them to populate examples
52
+ examples=[
53
+ [
54
+ # consider how to generalize this
55
+ parse_test_cases(test_cases, feature_names, gradio_input_types)[0],
56
+ feature_names
57
+ ]
58
+ ],
59
+ cache_examples=False
60
  )
61
 
62
  space.launch()