inoki-giskard ZeroCommand commited on
Commit
8f114e2
1 Parent(s): 2694247

wording-layout-fix-QA (#183)

Browse files

- fix wordings and minor layout issue (0fe497debeae819014bf9009a5b3bcce8eb6c285)
- change wording link (1c03709ce7ad4a8359a43e42015844536fd4544f)
- remove inference api checkbox (4a799a0e10385bb88785ebad169ff17bc2fee45a)


Co-authored-by: zcy <ZeroCommand@users.noreply.huggingface.co>

app_debug.py CHANGED
@@ -63,9 +63,9 @@ def get_queue_status():
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
- return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Jobs in queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
- return '<div style="padding-top: 5%">No jobs in queue, please submit an evaluation task from another tab.</div>'
69
 
70
 
71
  def get_demo():
 
63
  current = pipe.current
64
  if current is None:
65
  current = "None"
66
+ return f'<div style="padding-top: 5%">Current job: {html.escape(current)} <br/> Job queue: <br/> {"".join(get_jobs_info_in_queue())}</div>'
67
  else:
68
+ return '<div style="padding-top: 5%">No jobs waiting, please submit an evaluation task from Text-Classification tab.</div>'
69
 
70
 
71
  def get_demo():
app_leaderboard.py CHANGED
@@ -96,25 +96,25 @@ def get_demo(leaderboard_tab):
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
99
- with gr.Column():
100
- issue_columns_select = gr.CheckboxGroup(
101
- label="Issue Columns",
102
- choices=issue_columns,
103
- value=[],
104
- interactive=True,
105
- )
106
  with gr.Column():
107
  info_columns_select = gr.CheckboxGroup(
108
  label="Info Columns",
109
  choices=info_columns,
110
  value=default_columns,
111
  interactive=True,
 
 
 
 
 
 
 
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
- choices=["text_classification", "tabular"],
118
  value="text_classification",
119
  interactive=True,
120
  )
 
96
  display_df = get_display_df(default_df) # the styled dataframe to display
97
 
98
  with gr.Row():
 
 
 
 
 
 
 
99
  with gr.Column():
100
  info_columns_select = gr.CheckboxGroup(
101
  label="Info Columns",
102
  choices=info_columns,
103
  value=default_columns,
104
  interactive=True,
105
+ )
106
+ with gr.Column():
107
+ issue_columns_select = gr.CheckboxGroup(
108
+ label="Issue Columns",
109
+ choices=issue_columns,
110
+ value=[],
111
+ interactive=True,
112
  )
113
 
114
  with gr.Row():
115
  task_select = gr.Dropdown(
116
  label="Task",
117
+ choices=["text_classification"],
118
  value="text_classification",
119
  interactive=True,
120
  )
app_text_classification.py CHANGED
@@ -92,10 +92,8 @@ def get_demo():
92
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
93
  column_mappings.append(gr.Dropdown(visible=False))
94
 
95
- with gr.Accordion(label="Model Wrap Advance Config", open=True):
96
  gr.HTML(USE_INFERENCE_API_TIP)
97
-
98
- run_inference = gr.Checkbox(value=True, label="Run with Inference API")
99
  inference_token = gr.Textbox(
100
  placeholder="hf_xxxxxxxxxxxxxxxxxxxx",
101
  value="",
@@ -111,7 +109,7 @@ def get_demo():
111
  outputs=[inference_token_info],
112
  )
113
 
114
- with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
115
  scanners = gr.CheckboxGroup(visible=True)
116
 
117
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
@@ -145,7 +143,7 @@ def get_demo():
145
  with gr.Row():
146
  logs = gr.Textbox(
147
  value=CHECK_LOG_SECTION_RAW,
148
- label="Giskard Bot Evaluation Guide:",
149
  visible=False,
150
  every=0.5,
151
  )
@@ -156,7 +154,7 @@ def get_demo():
156
  gr.on(
157
  triggers=[model_id_input.change],
158
  fn=get_related_datasets_from_leaderboard,
159
- inputs=[model_id_input],
160
  outputs=[dataset_id_input],
161
  ).then(
162
  fn=check_dataset,
@@ -233,7 +231,6 @@ def get_demo():
233
  dataset_config_input,
234
  dataset_split_input,
235
  uid_label,
236
- run_inference,
237
  inference_token,
238
  ],
239
  outputs=[
@@ -257,7 +254,6 @@ def get_demo():
257
  dataset_id_input,
258
  dataset_config_input,
259
  dataset_split_input,
260
- run_inference,
261
  inference_token,
262
  uid_label,
263
  ],
@@ -274,14 +270,12 @@ def get_demo():
274
 
275
  gr.on(
276
  triggers=[
277
- run_inference.input,
278
  inference_token.input,
279
  scanners.input,
280
  ],
281
  fn=enable_run_btn,
282
  inputs=[
283
  uid_label,
284
- run_inference,
285
  inference_token,
286
  model_id_input,
287
  dataset_id_input,
@@ -296,7 +290,6 @@ def get_demo():
296
  fn=enable_run_btn,
297
  inputs=[
298
  uid_label,
299
- run_inference,
300
  inference_token,
301
  model_id_input,
302
  dataset_id_input,
 
92
  for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
93
  column_mappings.append(gr.Dropdown(visible=False))
94
 
95
+ with gr.Accordion(label="Model Wrap Advanced Config", open=True):
96
  gr.HTML(USE_INFERENCE_API_TIP)
 
 
97
  inference_token = gr.Textbox(
98
  placeholder="hf_xxxxxxxxxxxxxxxxxxxx",
99
  value="",
 
109
  outputs=[inference_token_info],
110
  )
111
 
112
+ with gr.Accordion(label="Scanner Advanced Config (optional)", open=False):
113
  scanners = gr.CheckboxGroup(visible=True)
114
 
115
  @gr.on(triggers=[uid_label.change], inputs=[uid_label], outputs=[scanners])
 
143
  with gr.Row():
144
  logs = gr.Textbox(
145
  value=CHECK_LOG_SECTION_RAW,
146
+ label="Log",
147
  visible=False,
148
  every=0.5,
149
  )
 
154
  gr.on(
155
  triggers=[model_id_input.change],
156
  fn=get_related_datasets_from_leaderboard,
157
+ inputs=[model_id_input, dataset_id_input],
158
  outputs=[dataset_id_input],
159
  ).then(
160
  fn=check_dataset,
 
231
  dataset_config_input,
232
  dataset_split_input,
233
  uid_label,
 
234
  inference_token,
235
  ],
236
  outputs=[
 
254
  dataset_id_input,
255
  dataset_config_input,
256
  dataset_split_input,
 
257
  inference_token,
258
  uid_label,
259
  ],
 
270
 
271
  gr.on(
272
  triggers=[
 
273
  inference_token.input,
274
  scanners.input,
275
  ],
276
  fn=enable_run_btn,
277
  inputs=[
278
  uid_label,
 
279
  inference_token,
280
  model_id_input,
281
  dataset_id_input,
 
290
  fn=enable_run_btn,
291
  inputs=[
292
  uid_label,
 
293
  inference_token,
294
  model_id_input,
295
  dataset_id_input,
run_jobs.py CHANGED
@@ -50,7 +50,6 @@ def prepare_env_and_get_command(
50
  d_id,
51
  config,
52
  split,
53
- inference,
54
  inference_token,
55
  uid,
56
  label_mapping,
@@ -60,10 +59,6 @@ def prepare_env_and_get_command(
60
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
61
  leaderboard_dataset = LEADERBOARD
62
 
63
- inference_type = "hf_pipeline"
64
- if inference and inference_token:
65
- inference_type = "hf_inference_api"
66
-
67
  executable = "giskard_scanner"
68
  try:
69
  # Copy the current requirements (might be changed)
@@ -100,7 +95,7 @@ def prepare_env_and_get_command(
100
  "--scan_config",
101
  get_submitted_yaml_path(uid),
102
  "--inference_type",
103
- inference_type,
104
  "--inference_api_token",
105
  inference_token,
106
  ]
 
50
  d_id,
51
  config,
52
  split,
 
53
  inference_token,
54
  uid,
55
  label_mapping,
 
59
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
60
  leaderboard_dataset = LEADERBOARD
61
 
 
 
 
 
62
  executable = "giskard_scanner"
63
  try:
64
  # Copy the current requirements (might be changed)
 
95
  "--scan_config",
96
  get_submitted_yaml_path(uid),
97
  "--inference_type",
98
+ "hf_inference_api",
99
  "--inference_api_token",
100
  inference_token,
101
  ]
text_classification_ui_helpers.py CHANGED
@@ -43,7 +43,7 @@ MAX_FEATURES = 20
43
  ds_dict = None
44
  ds_config = None
45
 
46
- def get_related_datasets_from_leaderboard(model_id):
47
  records = leaderboard.records
48
  model_id = strip_model_id_from_url(model_id)
49
  model_records = records[records["model_id"] == model_id]
@@ -52,7 +52,10 @@ def get_related_datasets_from_leaderboard(model_id):
52
  if len(datasets_unique) == 0:
53
  return gr.update(choices=[])
54
 
55
- return gr.update(choices=datasets_unique)
 
 
 
56
 
57
 
58
  logger = logging.getLogger(__file__)
@@ -249,7 +252,6 @@ def align_columns_and_show_prediction(
249
  dataset_config,
250
  dataset_split,
251
  uid,
252
- run_inference,
253
  inference_token,
254
  ):
255
  model_id = strip_model_id_from_url(model_id)
@@ -344,7 +346,7 @@ def align_columns_and_show_prediction(
344
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
345
  gr.update(value=prediction_response, visible=True),
346
  gr.update(visible=True, open=True),
347
- gr.update(interactive=(run_inference and inference_token != "")),
348
  "",
349
  *column_mappings,
350
  )
@@ -354,7 +356,7 @@ def align_columns_and_show_prediction(
354
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
355
  gr.update(value=prediction_response, visible=True),
356
  gr.update(visible=True, open=False),
357
- gr.update(interactive=(run_inference and inference_token != "")),
358
  "",
359
  *column_mappings,
360
  )
@@ -372,8 +374,8 @@ def check_column_mapping_keys_validity(all_mappings):
372
 
373
  return True
374
 
375
- def enable_run_btn(uid, run_inference, inference_token, model_id, dataset_id, dataset_config, dataset_split):
376
- if not run_inference or inference_token == "":
377
  logger.warn("Inference API is not enabled")
378
  return gr.update(interactive=False)
379
  if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
@@ -419,7 +421,7 @@ def show_hf_token_info(token):
419
  return gr.update(visible=True)
420
  return gr.update(visible=False)
421
 
422
- def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
423
  all_mappings = read_column_mapping(uid)
424
  if not check_column_mapping_keys_validity(all_mappings):
425
  return (gr.update(interactive=True), gr.update(visible=False))
@@ -437,7 +439,6 @@ def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
437
  d_id,
438
  config,
439
  split,
440
- inference,
441
  inference_token,
442
  uid,
443
  label_mapping,
 
43
  ds_dict = None
44
  ds_config = None
45
 
46
+ def get_related_datasets_from_leaderboard(model_id, dataset_id_input):
47
  records = leaderboard.records
48
  model_id = strip_model_id_from_url(model_id)
49
  model_records = records[records["model_id"] == model_id]
 
52
  if len(datasets_unique) == 0:
53
  return gr.update(choices=[])
54
 
55
+ if dataset_id_input in datasets_unique:
56
+ return gr.update(choices=datasets_unique)
57
+
58
+ return gr.update(choices=datasets_unique, value="")
59
 
60
 
61
  logger = logging.getLogger(__file__)
 
252
  dataset_config,
253
  dataset_split,
254
  uid,
 
255
  inference_token,
256
  ):
257
  model_id = strip_model_id_from_url(model_id)
 
346
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
347
  gr.update(value=prediction_response, visible=True),
348
  gr.update(visible=True, open=True),
349
+ gr.update(interactive=(inference_token != "")),
350
  "",
351
  *column_mappings,
352
  )
 
356
  gr.update(value=prediction_input, lines=min(len(prediction_input)//225 + 1, 5), visible=True),
357
  gr.update(value=prediction_response, visible=True),
358
  gr.update(visible=True, open=False),
359
+ gr.update(interactive=(inference_token != "")),
360
  "",
361
  *column_mappings,
362
  )
 
374
 
375
  return True
376
 
377
+ def enable_run_btn(uid, inference_token, model_id, dataset_id, dataset_config, dataset_split):
378
+ if inference_token == "":
379
  logger.warn("Inference API is not enabled")
380
  return gr.update(interactive=False)
381
  if model_id == "" or dataset_id == "" or dataset_config == "" or dataset_split == "":
 
421
  return gr.update(visible=True)
422
  return gr.update(visible=False)
423
 
424
+ def try_submit(m_id, d_id, config, split, inference_token, uid):
425
  all_mappings = read_column_mapping(uid)
426
  if not check_column_mapping_keys_validity(all_mappings):
427
  return (gr.update(interactive=True), gr.update(visible=False))
 
439
  d_id,
440
  config,
441
  split,
 
442
  inference_token,
443
  uid,
444
  label_mapping,
wordings.py CHANGED
@@ -1,7 +1,9 @@
1
  INTRODUCTION_MD = """
 
2
  <h1 style="text-align: center;">
3
  🐢Giskard Evaluator - Text Classification
4
  </h1>
 
5
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
6
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
7
  """
@@ -26,9 +28,7 @@ CHECK_CONFIG_OR_SPLIT_RAW = """
26
  Please check your dataset config or split.
27
  """
28
 
29
- CHECK_LOG_SECTION_RAW = """
30
- Your have successfully submitted a Giskard evaluation. Further details are available in the Logs tab. You can find your report will be posted to your model's community discussion.
31
- """
32
 
33
  PREDICTION_SAMPLE_MD = """
34
  <h1 style="text-align: center;">
@@ -58,7 +58,7 @@ USE_INFERENCE_API_TIP = """
58
  <a href="https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task">
59
  Hugging Face Inference API
60
  </a>
61
- . Please input your <a href="https://huggingface.co/settings/tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
62
  """
63
 
64
  HF_TOKEN_INVALID_STYLED= """
 
1
  INTRODUCTION_MD = """
2
+ <div style="display: flex; justify-content: center;">
3
  <h1 style="text-align: center;">
4
  🐢Giskard Evaluator - Text Classification
5
  </h1>
6
+ </div>
7
  Welcome to the Giskard Evaluator Space! Get a model vulnerability report immediately by simply sharing your model and dataset id below.
8
  You can also checkout our library documentation <a href="https://docs.giskard.ai/en/latest/getting_started/quickstart/index.html">here</a>.
9
  """
 
28
  Please check your dataset config or split.
29
  """
30
 
31
+ CHECK_LOG_SECTION_RAW = """Your have successfully submitted a Giskard evaluation job. Further details are available in the Logs tab. You can find your report posted in your model's community discussion section."""
 
 
32
 
33
  PREDICTION_SAMPLE_MD = """
34
  <h1 style="text-align: center;">
 
58
  <a href="https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task">
59
  Hugging Face Inference API
60
  </a>
61
+ . Please input your <a href="https://huggingface.co/docs/hub/security-tokens#user-access-tokens">Hugging Face token</a> to do so. You can find it <a href="https://huggingface.co/settings/tokens">here</a>.
62
  """
63
 
64
  HF_TOKEN_INVALID_STYLED= """