Spaces:
Sleeping
Sleeping
John Graham Reynolds
commited on
Commit
·
f75e383
1
Parent(s):
19d5834
add radio button for selecting averaging method
Browse files
app.py
CHANGED
@@ -19,38 +19,40 @@ local_path = Path(sys.path[0])
|
|
19 |
test_cases = [ {"predictions":[1,2,3,4,5], "references":[1,2,5,4,3]} ] # configure this randomly using randint generator and feature names?
|
20 |
|
21 |
# configure this based on the input type, etc. for launch_gradio_widget
|
22 |
-
def compute(input_df: pd.DataFrame):
|
|
|
|
|
23 |
|
24 |
cols = [col for col in input_df.columns]
|
25 |
-
|
26 |
predicted = [int(num) for num in input_df[cols[0]].to_list()]
|
27 |
references = [int(num) for num in input_df[cols[1]].to_list()]
|
28 |
|
29 |
metric.add_batch(predictions=predicted, references=references)
|
30 |
-
|
31 |
outputs = metric.compute()
|
32 |
|
33 |
f"Your metrics are as follows: \n {outputs}"
|
34 |
|
35 |
space = gr.Interface(
|
36 |
fn=compute,
|
37 |
-
inputs=
|
|
|
38 |
headers=feature_names,
|
39 |
col_count=len(feature_names),
|
40 |
row_count=5,
|
41 |
datatype=json_to_string_type(gradio_input_types),
|
42 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
outputs=gr.Textbox(label=metric.name),
|
44 |
-
description=
|
45 |
-
metric.info.description + "\nIf this is a text-based metric, make sure to wrap your input in double quotes."
|
46 |
-
" Alternatively you can use a JSON-formatted list as input."
|
47 |
-
),
|
48 |
title=f"Metric: {metric.name}",
|
49 |
article=parse_readme(local_path / "README.md"),
|
50 |
-
# TODO: load test cases and use them to populate examples
|
51 |
examples=[
|
52 |
-
|
53 |
-
parse_test_cases(test_cases, feature_names, gradio_input_types)
|
54 |
],
|
55 |
cache_examples=False
|
56 |
)
|
|
|
19 |
test_cases = [ {"predictions":[1,2,3,4,5], "references":[1,2,5,4,3]} ] # configure this randomly using randint generator and feature names?
|
20 |
|
21 |
# configure this based on the input type, etc. for launch_gradio_widget
|
22 |
+
def compute(input_df: pd.DataFrame, method: str):
|
23 |
+
|
24 |
+
metric = FixedF1(average=method if method != "None" else None)
|
25 |
|
26 |
cols = [col for col in input_df.columns]
|
|
|
27 |
predicted = [int(num) for num in input_df[cols[0]].to_list()]
|
28 |
references = [int(num) for num in input_df[cols[1]].to_list()]
|
29 |
|
30 |
metric.add_batch(predictions=predicted, references=references)
|
|
|
31 |
outputs = metric.compute()
|
32 |
|
33 |
f"Your metrics are as follows: \n {outputs}"
|
34 |
|
35 |
space = gr.Interface(
|
36 |
fn=compute,
|
37 |
+
inputs=[
|
38 |
+
gr.Dataframe(
|
39 |
headers=feature_names,
|
40 |
col_count=len(feature_names),
|
41 |
row_count=5,
|
42 |
datatype=json_to_string_type(gradio_input_types),
|
43 |
),
|
44 |
+
gr.Radio(
|
45 |
+
["weighted", "micro", "macro", "None", "binary"],
|
46 |
+
label="Averaging Method",
|
47 |
+
info="Method for averaging the F1 score across labels. `Binary` only works if you are evaluating a binary classification model."
|
48 |
+
)
|
49 |
+
],
|
50 |
outputs=gr.Textbox(label=metric.name),
|
51 |
+
description=metric.info.description,
|
|
|
|
|
|
|
52 |
title=f"Metric: {metric.name}",
|
53 |
article=parse_readme(local_path / "README.md"),
|
|
|
54 |
examples=[
|
55 |
+
[pd.DataFrame(parse_test_cases(test_cases, feature_names, gradio_input_types)[0]), "weighted"],
|
|
|
56 |
],
|
57 |
cache_examples=False
|
58 |
)
|