ZeroCommand commited on
Commit
80ed307
β€’
1 Parent(s): cbb886a

fix bugs and add logs for leaderboard tab

Browse files
app_leaderboard.py CHANGED
@@ -5,9 +5,11 @@ from fetch_utils import check_dataset_and_get_config, check_dataset_and_get_spli
5
 
6
  def get_records_from_dataset_repo(dataset_id):
7
  dataset_config = check_dataset_and_get_config(dataset_id)
 
8
  logging.info(f"Dataset {dataset_id} has configs {dataset_config}")
9
  dataset_split = check_dataset_and_get_split(dataset_id, dataset_config[0])
10
  logging.info(f"Dataset {dataset_id} has splits {dataset_split}")
 
11
  try:
12
  ds = datasets.load_dataset(dataset_id, dataset_config[0])[dataset_split[0]]
13
  df = ds.to_pandas()
@@ -40,13 +42,14 @@ def get_types(ds):
40
  def get_display_df(df):
41
  # style all elements in the model_id column
42
  display_df = df.copy()
43
- if display_df['model_id'].any():
 
44
  display_df['model_id'] = display_df['model_id'].apply(lambda x: f'<p href="https://huggingface.co/{x}" style="color:blue">πŸ”—{x}</p>')
45
  # style all elements in the dataset_id column
46
- if display_df['dataset_id'].any():
47
  display_df['dataset_id'] = display_df['dataset_id'].apply(lambda x: f'<p href="https://huggingface.co/datasets/{x}" style="color:blue">πŸ”—{x}</p>')
48
  # style all elements in the report_link column
49
- if display_df['report_link'].any():
50
  display_df['report_link'] = display_df['report_link'].apply(lambda x: f'<p href="{x}" style="color:blue">πŸ”—{x}</p>')
51
  return display_df
52
 
@@ -57,7 +60,7 @@ def get_demo():
57
  dataset_ids = get_dataset_ids(records)
58
 
59
  column_names = records.columns.tolist()
60
- default_columns = ['model_id', 'dataset_id', 'total_issue', 'report_link']
61
  # set the default columns to show
62
  default_df = records[default_columns]
63
  types = get_types(default_df)
@@ -79,7 +82,7 @@ def get_demo():
79
  outputs=[leaderboard_df])
80
  def filter_table(model_id, dataset_id, columns, task):
81
  # filter the table based on task
82
- df = records[(records['hf_pipeline_type'] == task)]
83
  # filter the table based on the model_id and dataset_id
84
  if model_id:
85
  df = records[(records['model_id'] == model_id)]
 
5
 
6
  def get_records_from_dataset_repo(dataset_id):
7
  dataset_config = check_dataset_and_get_config(dataset_id)
8
+
9
  logging.info(f"Dataset {dataset_id} has configs {dataset_config}")
10
  dataset_split = check_dataset_and_get_split(dataset_id, dataset_config[0])
11
  logging.info(f"Dataset {dataset_id} has splits {dataset_split}")
12
+
13
  try:
14
  ds = datasets.load_dataset(dataset_id, dataset_config[0])[dataset_split[0]]
15
  df = ds.to_pandas()
 
42
  def get_display_df(df):
43
  # style all elements in the model_id column
44
  display_df = df.copy()
45
+ columns = display_df.columns.tolist()
46
+ if 'model_id' in columns:
47
  display_df['model_id'] = display_df['model_id'].apply(lambda x: f'<p href="https://huggingface.co/{x}" style="color:blue">πŸ”—{x}</p>')
48
  # style all elements in the dataset_id column
49
+ if 'dataset_id' in columns:
50
  display_df['dataset_id'] = display_df['dataset_id'].apply(lambda x: f'<p href="https://huggingface.co/datasets/{x}" style="color:blue">πŸ”—{x}</p>')
51
  # style all elements in the report_link column
52
+ if 'report_link' in columns:
53
  display_df['report_link'] = display_df['report_link'].apply(lambda x: f'<p href="{x}" style="color:blue">πŸ”—{x}</p>')
54
  return display_df
55
 
 
60
  dataset_ids = get_dataset_ids(records)
61
 
62
  column_names = records.columns.tolist()
63
+ default_columns = ['model_id', 'dataset_id', 'total_issues', 'report_link']
64
  # set the default columns to show
65
  default_df = records[default_columns]
66
  types = get_types(default_df)
 
82
  outputs=[leaderboard_df])
83
  def filter_table(model_id, dataset_id, columns, task):
84
  # filter the table based on task
85
+ df = records[(records['task'] == task)]
86
  # filter the table based on the model_id and dataset_id
87
  if model_id:
88
  df = records[(records['model_id'] == model_id)]
app_text_classification.py CHANGED
@@ -11,7 +11,7 @@ from transformers.pipelines import TextClassificationPipeline
11
 
12
  from text_classification import get_labels_and_features_from_dataset, check_model, get_example_prediction
13
  from io_utils import read_scanners, write_scanners, read_inference_type, read_column_mapping, write_column_mapping, write_inference_type
14
- from wordings import CONFIRM_MAPPING_DETAILS_MD, CONFIRM_MAPPING_DETAILS_FAIL_RAW
15
 
16
  HF_REPO_ID = 'HF_REPO_ID'
17
  HF_SPACE_ID = 'SPACE_ID'
@@ -95,7 +95,7 @@ def check_dataset_and_get_split(dataset_id, dataset_config):
95
 
96
  def get_demo():
97
  with gr.Row():
98
- gr.Markdown(CONFIRM_MAPPING_DETAILS_MD)
99
  with gr.Row():
100
  model_id_input = gr.Textbox(
101
  label="Hugging Face model id",
@@ -117,13 +117,17 @@ def get_demo():
117
  example_prediction = gr.Label(label='Model Prediction Sample', visible=False)
118
 
119
  with gr.Row():
120
- column_mappings = []
121
- with gr.Column():
122
- for _ in range(MAX_LABELS):
123
- column_mappings.append(gr.Dropdown(visible=False))
124
- with gr.Column():
125
- for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
126
- column_mappings.append(gr.Dropdown(visible=False))
 
 
 
 
127
 
128
  with gr.Accordion(label='Model Wrap Advance Config (optional)', open=False):
129
  run_local = gr.Checkbox(value=True, label="Run in this Space")
@@ -182,7 +186,7 @@ def get_demo():
182
 
183
  @gr.on(triggers=[model_id_input.change, dataset_config_input.change, dataset_split_input.change],
184
  inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input],
185
- outputs=[example_input, example_prediction, *column_mappings])
186
  def check_model_and_show_prediction(model_id, dataset_id, dataset_config, dataset_split):
187
  ppl = check_model(model_id)
188
  if ppl is None or not isinstance(ppl, TextClassificationPipeline):
@@ -207,12 +211,14 @@ def get_demo():
207
  return (
208
  gr.update(visible=False),
209
  gr.update(visible=False),
 
210
  *column_mappings
211
  )
212
  prediction_input, prediction_output = get_example_prediction(ppl, dataset_id, dataset_config, dataset_split)
213
  return (
214
  gr.update(value=prediction_input, visible=True),
215
  gr.update(value=prediction_output, visible=True),
 
216
  *column_mappings
217
  )
218
 
@@ -223,10 +229,16 @@ def get_demo():
223
  inputs=[dataset_id_input, dataset_config_input],
224
  outputs=[dataset_split_input])
225
 
 
 
 
 
 
226
  run_inference.change(
227
  write_inference_type,
228
  inputs=[run_inference]
229
  )
 
230
  gr.on(
231
  triggers=[
232
  run_btn.click,
 
11
 
12
  from text_classification import get_labels_and_features_from_dataset, check_model, get_example_prediction
13
  from io_utils import read_scanners, write_scanners, read_inference_type, read_column_mapping, write_column_mapping, write_inference_type
14
+ from wordings import INTRODUCTION_MD, CONFIRM_MAPPING_DETAILS_MD, CONFIRM_MAPPING_DETAILS_FAIL_RAW
15
 
16
  HF_REPO_ID = 'HF_REPO_ID'
17
  HF_SPACE_ID = 'SPACE_ID'
 
95
 
96
  def get_demo():
97
  with gr.Row():
98
+ gr.Markdown(INTRODUCTION_MD)
99
  with gr.Row():
100
  model_id_input = gr.Textbox(
101
  label="Hugging Face model id",
 
117
  example_prediction = gr.Label(label='Model Prediction Sample', visible=False)
118
 
119
  with gr.Row():
120
+ with gr.Accordion(label='Label and Feature Mapping', visible=False, open=False) as column_mapping_accordion:
121
+ with gr.Row():
122
+ gr.Markdown(CONFIRM_MAPPING_DETAILS_MD)
123
+ column_mappings = []
124
+ with gr.Row():
125
+ with gr.Column():
126
+ for _ in range(MAX_LABELS):
127
+ column_mappings.append(gr.Dropdown(visible=False))
128
+ with gr.Column():
129
+ for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
130
+ column_mappings.append(gr.Dropdown(visible=False))
131
 
132
  with gr.Accordion(label='Model Wrap Advance Config (optional)', open=False):
133
  run_local = gr.Checkbox(value=True, label="Run in this Space")
 
186
 
187
  @gr.on(triggers=[model_id_input.change, dataset_config_input.change, dataset_split_input.change],
188
  inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input],
189
+ outputs=[example_input, example_prediction, column_mapping_accordion, *column_mappings])
190
  def check_model_and_show_prediction(model_id, dataset_id, dataset_config, dataset_split):
191
  ppl = check_model(model_id)
192
  if ppl is None or not isinstance(ppl, TextClassificationPipeline):
 
211
  return (
212
  gr.update(visible=False),
213
  gr.update(visible=False),
214
+ gr.update(visible=False, open=False),
215
  *column_mappings
216
  )
217
  prediction_input, prediction_output = get_example_prediction(ppl, dataset_id, dataset_config, dataset_split)
218
  return (
219
  gr.update(value=prediction_input, visible=True),
220
  gr.update(value=prediction_output, visible=True),
221
+ gr.update(open=False),
222
  *column_mappings
223
  )
224
 
 
229
  inputs=[dataset_id_input, dataset_config_input],
230
  outputs=[dataset_split_input])
231
 
232
+ scanners.change(
233
+ write_scanners,
234
+ inputs=scanners
235
+ )
236
+
237
  run_inference.change(
238
  write_inference_type,
239
  inputs=[run_inference]
240
  )
241
+
242
  gr.on(
243
  triggers=[
244
  run_btn.click,
fetch_utils.py CHANGED
@@ -1,5 +1,6 @@
1
  import huggingface_hub
2
  import datasets
 
3
 
4
  def check_dataset_and_get_config(dataset_id):
5
  try:
@@ -12,12 +13,14 @@ def check_dataset_and_get_config(dataset_id):
12
  def check_dataset_and_get_split(dataset_id, dataset_config):
13
  try:
14
  ds = datasets.load_dataset(dataset_id, dataset_config)
15
- except Exception:
16
  # Dataset may not exist
 
17
  return None
18
  try:
19
  splits = list(ds.keys())
20
  return splits
21
- except Exception:
22
  # Dataset has no splits
 
23
  return None
 
1
  import huggingface_hub
2
  import datasets
3
+ import logging
4
 
5
  def check_dataset_and_get_config(dataset_id):
6
  try:
 
13
  def check_dataset_and_get_split(dataset_id, dataset_config):
14
  try:
15
  ds = datasets.load_dataset(dataset_id, dataset_config)
16
+ except Exception as e:
17
  # Dataset may not exist
18
+ logging.warning(f"Failed to load dataset {dataset_id} with config {dataset_config}: {e}")
19
  return None
20
  try:
21
  splits = list(ds.keys())
22
  return splits
23
+ except Exception as e:
24
  # Dataset has no splits
25
+ logging.warning(f"Dataset {dataset_id} with config {dataset_config} has no splits: {e}")
26
  return None
wordings.py CHANGED
@@ -1,10 +1,15 @@
1
- CONFIRM_MAPPING_DETAILS_MD = '''
2
  <h1 style="text-align: center;">
3
- Giskard Evaluator
4
  </h1>
5
  Welcome to Giskard Evaluator Space! Get your report immediately by simply input your model id and dataset id below. Follow our leads and improve your model in no time.
6
  '''
7
-
 
 
 
 
 
8
  CONFIRM_MAPPING_DETAILS_FAIL_MD = '''
9
  <h1 style="text-align: center;">
10
  Confirm Pre-processing Details
 
1
+ INTRODUCTION_MD = '''
2
  <h1 style="text-align: center;">
3
+ 🐒Giskard Evaluator
4
  </h1>
5
  Welcome to Giskard Evaluator Space! Get your report immediately by simply input your model id and dataset id below. Follow our leads and improve your model in no time.
6
  '''
7
+ CONFIRM_MAPPING_DETAILS_MD = '''
8
+ <h1 style="text-align: center;">
9
+ Confirm Pre-processing Details
10
+ </h1>
11
+ Please confirm the pre-processing details below. If you are not sure, please double check your model and dataset.
12
+ '''
13
  CONFIRM_MAPPING_DETAILS_FAIL_MD = '''
14
  <h1 style="text-align: center;">
15
  Confirm Pre-processing Details