virtualmartire commited on
Commit
ce1fac2
1 Parent(s): b9dee86

refactoring...

Browse files
Files changed (13) hide show
  1. .DS_Store +0 -0
  2. .gitignore +2 -0
  3. app.py +45 -118
  4. assets/.DS_Store +0 -0
  5. banner.png +0 -0
  6. constants.py +16 -12
  7. init.py +0 -93
  8. leaderboard.csv +3 -0
  9. requested_models.txt +3 -0
  10. utils/__init__.py +0 -0
  11. utils/display.py +10 -0
  12. utils/eval_request.py +62 -0
  13. utils_display.py +0 -40
.DS_Store DELETED
Binary file (6.15 kB)
 
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __pycache__
2
+ .DS_Store
app.py CHANGED
@@ -1,148 +1,75 @@
1
  import gradio as gr
2
  import pandas as pd
3
- import json
4
- from constants import BANNER, INTRODUCTION_TEXT, CITATION_TEXT, METRICS_TAB_TEXT, DIR_OUTPUT_REQUESTS
5
- from init import is_model_on_hub, upload_file, load_all_info_from_dataset_hub
6
- from utils_display import AutoEvalColumn, fields, make_clickable_model, styled_error, styled_message
7
- from datetime import datetime, timezone
8
 
9
- LAST_UPDATED = "Feb 10th 2024"
 
10
 
11
- column_names = {
12
- "MODEL": "Model",
13
- "Avg. WER": "Average WER ⬇️",
14
- "RTF": "RTF (1e-3) ⬇️",
15
- "AMI WER": "AMI",
16
- "Earnings22 WER": "Earnings22",
17
- "Gigaspeech WER": "Gigaspeech",
18
- "LS Clean WER": "LS Clean",
19
- "LS Other WER": "LS Other",
20
- "SPGISpeech WER": "SPGISpeech",
21
- "Tedlium WER": "Tedlium",
22
- "Voxpopuli WER": "Voxpopuli",
23
- "Common Voice WER": "Common Voice 9"}
24
 
25
- eval_queue_repo, requested_models, csv_results = load_all_info_from_dataset_hub()
26
 
27
- if not csv_results.exists():
28
- raise Exception(f"CSV file {csv_results} does not exist locally")
29
-
30
- # Get csv with data and parse columns
31
- original_df = pd.read_csv(csv_results)
32
-
33
- # Formats the columns
34
- def formatter(x):
35
- if type(x) is str:
36
- x = x
37
- else:
38
- x = round(x, 2)
39
- return x
40
-
41
- for col in original_df.columns:
42
  if col == "model":
43
- original_df[col] = original_df[col].apply(lambda x: x.replace(x, make_clickable_model(x)))
44
  else:
45
- original_df[col] = original_df[col].apply(formatter) # For numerical values
46
-
47
- original_df.rename(columns=column_names, inplace=True)
48
- original_df.sort_values(by='Average WER ⬇️', inplace=True)
49
-
50
- COLS = [c.name for c in fields(AutoEvalColumn)]
51
- TYPES = [c.type for c in fields(AutoEvalColumn)]
52
-
53
-
54
- def request_model(model_text, chbcoco2017):
55
-
56
- # Determine the selected checkboxes
57
- dataset_selection = []
58
- if chbcoco2017:
59
- dataset_selection.append("ESB Datasets tests only")
60
-
61
- if len(dataset_selection) == 0:
62
- return styled_error("You need to select at least one dataset")
63
-
64
- base_model_on_hub, error_msg = is_model_on_hub(model_text)
65
 
66
- if not base_model_on_hub:
67
- return styled_error(f"Base model '{model_text}' {error_msg}")
68
-
69
- # Construct the output dictionary
70
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
71
- required_datasets = ', '.join(dataset_selection)
72
- eval_entry = {
73
- "date": current_time,
74
- "model": model_text,
75
- "datasets_selected": required_datasets
76
- }
77
-
78
- # Prepare file path
79
- DIR_OUTPUT_REQUESTS.mkdir(parents=True, exist_ok=True)
80
-
81
- fn_datasets = '@ '.join(dataset_selection)
82
- filename = model_text.replace("/","@") + "@@" + fn_datasets
83
- if filename in requested_models:
84
- return styled_error(f"A request for this model '{model_text}' and dataset(s) was already made.")
85
- try:
86
- filename_ext = filename + ".txt"
87
- out_filepath = DIR_OUTPUT_REQUESTS / filename_ext
88
-
89
- # Write the results to a text file
90
- with open(out_filepath, "w") as f:
91
- f.write(json.dumps(eval_entry))
92
-
93
- upload_file(filename, out_filepath)
94
-
95
- # Include file in the list of uploaded files
96
- requested_models.append(filename)
97
-
98
- # Remove the local file
99
- out_filepath.unlink()
100
 
101
- return styled_message("🤗 Your request has been submitted and will be evaluated soon!</p>")
102
- except Exception as e:
103
- return styled_error(f"Error submitting request!")
104
-
105
- with gr.Blocks() as demo:
106
- gr.HTML(BANNER, elem_id="banner")
107
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
108
 
109
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
 
110
  with gr.TabItem("🏅 Leaderboard", elem_id="od-benchmark-tab-table", id=0):
111
  leaderboard_table = gr.components.Dataframe(
112
- value=original_df,
113
- datatype=TYPES,
114
- max_rows=None,
115
- elem_id="leaderboard-table",
116
- interactive=False,
117
- visible=True,
118
- )
119
 
120
  with gr.TabItem("📈 Metrics", elem_id="od-benchmark-tab-table", id=1):
121
- gr.Markdown(METRICS_TAB_TEXT, elem_classes="markdown-text")
122
 
123
- with gr.TabItem("✉️✨ Request a model here!", elem_id="od-benchmark-tab-table", id=2):
124
  with gr.Column():
125
- gr.Markdown("# ✉️✨ Request results for a new model here!", elem_classes="markdown-text")
126
  with gr.Column():
127
- gr.Markdown("Select a dataset:", elem_classes="markdown-text")
128
  with gr.Column():
129
- model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
130
- chb_coco2017 = gr.Checkbox(label="COCO validation 2017 dataset", visible=False, value=True, interactive=False)
131
  with gr.Column():
132
- mdw_submission_result = gr.Markdown()
133
  btn_submitt = gr.Button(value="🚀 Request")
134
- btn_submitt.click(request_model,
135
- [model_name_textbox, chb_coco2017],
136
- mdw_submission_result)
137
 
138
- gr.Markdown(f"Last updated on **{LAST_UPDATED}**", elem_classes="markdown-text")
 
 
 
 
 
 
139
 
140
  with gr.Row():
141
  with gr.Accordion("📙 Citation", open=False):
142
  gr.Textbox(
143
- value=CITATION_TEXT, lines=7,
144
  label="Copy the BibTeX snippet to cite this source",
145
  elem_id="citation-button",
146
- ).style(show_copy_button=True)
 
 
147
 
148
- demo.launch()
 
1
  import gradio as gr
2
  import pandas as pd
 
 
 
 
 
3
 
4
+ import constants
5
+ from utils import display, eval_request
6
 
7
+ #
8
+ ##
9
+ ###
10
+ ##
11
+ #
 
 
 
 
 
 
 
 
12
 
13
+ leaderboard_df = pd.read_csv("leaderboard.csv")
14
 
15
+ # Format the dataframe
16
+ for col in leaderboard_df.columns:
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  if col == "model":
18
+ leaderboard_df[col] = leaderboard_df[col].apply(lambda x: x.replace(x, display.make_clickable_model(x)))
19
  else:
20
+ leaderboard_df[col] = leaderboard_df[col].apply(display.round_numbers)
21
+ leaderboard_df.rename(
22
+ columns={"Average WER": "Average WER ⬇️", "RTF (1e-3)": "RTF (1e-3) ⬇️"},
23
+ inplace=True,
24
+ )
25
+ leaderboard_df.sort_values(by='Average WER ⬇️', inplace=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ with gr.Blocks() as leaderboard_app:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ gr.HTML(constants.BANNER, elem_id="constants.banner")
30
+ gr.Markdown(constants.INTRODUCTION_TEXT, elem_classes="markdown-text")
 
 
 
 
 
31
 
32
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
33
+
34
  with gr.TabItem("🏅 Leaderboard", elem_id="od-benchmark-tab-table", id=0):
35
  leaderboard_table = gr.components.Dataframe(
36
+ value=leaderboard_df,
37
+ datatype=constants.COLUMN_DTYPES_LIST,
38
+ elem_id="leaderboard-table",
39
+ interactive=False,
40
+ visible=True,
41
+ )
 
42
 
43
  with gr.TabItem("📈 Metrics", elem_id="od-benchmark-tab-table", id=1):
44
+ gr.Markdown(constants.METRICS_TAB_TEXT, elem_classes="markdown-text")
45
 
46
+ with gr.TabItem("✉️ Request a model here!", elem_id="od-benchmark-tab-table", id=2):
47
  with gr.Column():
48
+ gr.Markdown("# ✉️ Request results for a new model here!", elem_classes="markdown-text")
49
  with gr.Column():
 
50
  with gr.Column():
51
+ model_id = gr.Textbox(label="Model ID (user_name/model_name)")
 
52
  with gr.Column():
53
+ md_submission_result = gr.Markdown()
54
  btn_submitt = gr.Button(value="🚀 Request")
55
+ btn_submitt.click(eval_request.request_model, [model_id], md_submission_result)
 
 
56
 
57
+ with gr.TabItem("☢️ Evaluate", elem_id="od-benchmark-tab-table", id=3):
58
+ with gr.Column():
59
+ gr.Markdown("For admins only.", elem_classes="markdown-text")
60
+ with gr.Column():
61
+ md_submission_result = gr.Markdown()
62
+ btn_submitt = gr.Button(value="RUN EVALUATION")
63
+ # btn_submitt.click(eval_request.request_model, [model_id], md_submission_result)
64
 
65
  with gr.Row():
66
  with gr.Accordion("📙 Citation", open=False):
67
  gr.Textbox(
68
+ value=constants.CITATION_TEXT, lines=7,
69
  label="Copy the BibTeX snippet to cite this source",
70
  elem_id="citation-button",
71
+ show_label=True,
72
+ show_copy_button=True,
73
+ )
74
 
75
+ leaderboard_app.launch(allowed_paths=["banner.png"])
assets/.DS_Store DELETED
Binary file (6.15 kB)
 
banner.png ADDED
constants.py CHANGED
@@ -1,15 +1,19 @@
1
- from pathlib import Path
2
-
3
- # Directory where request by models are stored
4
- DIR_OUTPUT_REQUESTS = Path("requested_models")
5
- EVAL_REQUESTS_PATH = Path("eval_requests")
6
-
7
- ##########################
8
- # Text definitions #
9
- ##########################
10
-
11
- banner_url = "https://huggingface.co/datasets/reach-vb/random-images/resolve/main/asr_leaderboard.png"
12
- BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 40vw; min-width: 300px; max-width: 600px;"> </div>'
 
 
 
 
13
 
14
  TITLE = "<html> <head> <style> h1 {text-align: center;} </style> </head> <body> <h1> 🤗 Open Automatic Speech Recognition Leaderboard </b> </body> </html>"
15
 
 
1
+ #
2
+ ##
3
+ ### CONSTANTS
4
+ ##
5
+ #
6
+
7
+ COLUMN_DTYPES_LIST = ["markdown", "number", "number", "number"] # in accordance with ./leaderboard.csv
8
+
9
+ #
10
+ ##
11
+ ### TEXT WALLS
12
+ ##
13
+ #
14
+
15
+ banner_url = "/file=banner.png" # gradio file path
16
+ BANNER = f'<div style="display: flex; justify-content: space-around;"><img src="{banner_url}" alt="Banner" style="width: 70%;"> </div>'
17
 
18
  TITLE = "<html> <head> <style> h1 {text-align: center;} </style> </head> <body> <h1> 🤗 Open Automatic Speech Recognition Leaderboard </b> </body> </html>"
19
 
init.py DELETED
@@ -1,93 +0,0 @@
1
- import os
2
- from constants import EVAL_REQUESTS_PATH
3
- from pathlib import Path
4
- from huggingface_hub import HfApi, Repository
5
-
6
- TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
7
- QUEUE_REPO = os.environ.get("QUEUE_REPO", None)
8
- QUEUE_PATH = os.environ.get("QUEUE_PATH", None)
9
-
10
- hf_api = HfApi(
11
- endpoint="https://huggingface.co",
12
- token=TOKEN_HUB,
13
- )
14
-
15
- def load_all_info_from_dataset_hub():
16
- eval_queue_repo = None
17
- results_csv_path = None
18
- requested_models = None
19
-
20
- passed = True
21
- if TOKEN_HUB is None:
22
- passed = False
23
- else:
24
- print("Pulling evaluation requests and results.")
25
-
26
- eval_queue_repo = Repository(
27
- local_dir=QUEUE_PATH,
28
- clone_from=QUEUE_REPO,
29
- use_auth_token=TOKEN_HUB,
30
- repo_type="dataset",
31
- )
32
- eval_queue_repo.git_pull()
33
-
34
- # Local directory where dataset repo is cloned + folder with eval requests
35
- directory = QUEUE_PATH / EVAL_REQUESTS_PATH
36
- requested_models = get_all_requested_models(directory)
37
- requested_models = [p.stem for p in requested_models]
38
- # Local directory where dataset repo is cloned
39
- csv_results = get_csv_with_results(QUEUE_PATH)
40
- if csv_results is None:
41
- passed = False
42
- if not passed:
43
- print("No HuggingFace token provided. Skipping evaluation requests and results.")
44
-
45
- return eval_queue_repo, requested_models, csv_results
46
-
47
-
48
- def upload_file(requested_model_name, path_or_fileobj):
49
- dest_repo_file = Path(EVAL_REQUESTS_PATH) / path_or_fileobj.name
50
- dest_repo_file = str(dest_repo_file)
51
- hf_api.upload_file(
52
- path_or_fileobj=path_or_fileobj,
53
- path_in_repo=str(dest_repo_file),
54
- repo_id=QUEUE_REPO,
55
- token=TOKEN_HUB,
56
- repo_type="dataset",
57
- commit_message=f"Add {requested_model_name} to eval queue")
58
-
59
- def get_all_requested_models(directory):
60
- directory = Path(directory)
61
- all_requested_models = list(directory.glob("*.txt"))
62
- return all_requested_models
63
-
64
- def get_csv_with_results(directory):
65
- directory = Path(directory)
66
- all_csv_files = list(directory.glob("*.csv"))
67
- latest = [f for f in all_csv_files if f.stem.endswith("latest")]
68
- if len(latest) != 1:
69
- return None
70
- return latest[0]
71
-
72
-
73
-
74
- def is_model_on_hub(model_name, revision="main") -> bool:
75
- try:
76
- model_name = model_name.replace(" ","")
77
- author = model_name.split("/")[0]
78
- model_id = model_name.split("/")[1]
79
- if len(author) == 0 or len(model_id) == 0:
80
- return False, "is not a valid model name. Please use the format `author/model_name`."
81
- except Exception as e:
82
- return False, "is not a valid model name. Please use the format `author/model_name`."
83
-
84
- try:
85
- models = list(hf_api.list_models(author=author, search=model_id))
86
- matched = [model_name for m in models if m.modelId == model_name]
87
- if len(matched) != 1:
88
- return False, "was not found on the hub!"
89
- else:
90
- return True, None
91
- except Exception as e:
92
- print(f"Could not get the model from the hub.: {e}")
93
- return False, "was not found on hub!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
leaderboard.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ model,Average WER,RTF (1e-3),AMI WER,Earnings22 WER,Gigaspeech WER,LS Clean WER,LS Other WER,SPGISpeech WER,Tedlium WER,Voxpopuli WER,Common Voice WER
2
+ openai/whisper-large-v3,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1,1.2
3
+ nvidia/canary-1b,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1
requested_models.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ nvidia/parakeet-ctc-0.6b
2
+ openai/whisper-large
3
+ distil-whisper/distil-medium.en
utils/__init__.py ADDED
File without changes
utils/display.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ def make_clickable_model(model_name):
2
+ link = f"https://huggingface.co/{model_name}"
3
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
4
+
5
+ def round_numbers(x):
6
+ if type(x) is str:
7
+ x = x
8
+ else:
9
+ x = round(x, 2)
10
+ return x
utils/eval_request.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import huggingface_hub as hf_hub
3
+
4
+ TOKEN_HUB = os.environ.get("TOKEN_HUB", None)
5
+ hf_api = hf_hub.HfApi(
6
+ endpoint="https://huggingface.co",
7
+ token=TOKEN_HUB,
8
+ )
9
+
10
+
11
+ #
12
+ ##
13
+ ###
14
+ ##
15
+ #
16
+
17
+
18
+ def is_model_on_hub(model_id):
19
+ """Check if a model is on the hub and return a failure message if not."""
20
+
21
+ try:
22
+ author = model_id.split("/")[0]
23
+ model_name = model_id.split("/")[1]
24
+ if len(author) == 0 or len(model_name) == 0:
25
+ return "is not a valid model name. Please use the format `author/model_name`."
26
+ except:
27
+ return "is not a valid model name. Please use the format `author/model_name`."
28
+
29
+ try:
30
+ models = list(hf_api.list_models(author=author, search=model_name))
31
+ matched = [model_id for m in models if m.modelId == model_id]
32
+ if len(matched) != 1:
33
+ return "was not found on the hub!"
34
+ else:
35
+ return None
36
+ except:
37
+ return "was not found on hub!"
38
+
39
+ def request_model(model_id):
40
+
41
+ def styled_error(error):
42
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
43
+
44
+ def styled_message(message):
45
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
46
+
47
+ # Check if the model is on the hub
48
+ error_msg = is_model_on_hub(model_id)
49
+ if error_msg != None:
50
+ return styled_error(f"{model_id} {error_msg}")
51
+
52
+ # Check if the model was already requested
53
+ with open("requested_models.txt", "r") as f:
54
+ requested_models_list = f.read().splitlines()
55
+ if model_id in requested_models_list:
56
+ return styled_error(f"A request for {model_id} was already made.")
57
+
58
+ # Add the model to the evaluation queue
59
+ with open("requested_models.txt", "a") as f:
60
+ f.write(model_id + "\n")
61
+
62
+ return styled_message("🤗 Your request has been submitted and will be evaluated as soon as possible!")
utils_display.py DELETED
@@ -1,40 +0,0 @@
1
- from dataclasses import dataclass
2
-
3
- # These classes are for user facing column names, to avoid having to change them
4
- # all around the code when a modif is needed
5
- @dataclass
6
- class ColumnContent:
7
- name: str
8
- type: str
9
-
10
- def fields(raw_class):
11
- return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
12
-
13
- @dataclass(frozen=True)
14
- class AutoEvalColumn: # Auto evals column
15
- model = ColumnContent("Model", "markdown")
16
- avg_wer = ColumnContent("Average WER ⬇️", "number")
17
- rtf = ColumnContent("RTF (1e-3) ⬇️", "number")
18
- ami_wer = ColumnContent("AMI", "number")
19
- e22_wer = ColumnContent("Earnings22", "number")
20
- gs_wer = ColumnContent("Gigaspeech", "number")
21
- lsc_wer = ColumnContent("LS Clean", "number")
22
- lso_wer = ColumnContent("LS Other", "number")
23
- ss_wer = ColumnContent("SPGISpeech", "number")
24
- tl_wer = ColumnContent("Tedlium", "number")
25
- vp_wer = ColumnContent("Voxpopuli", "number")
26
- cv_wer = ColumnContent("Common Voice", "number")
27
-
28
-
29
- def make_clickable_model(model_name):
30
- link = f"https://huggingface.co/{model_name}"
31
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
32
-
33
- def styled_error(error):
34
- return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
35
-
36
- def styled_warning(warn):
37
- return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
38
-
39
- def styled_message(message):
40
- return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"