inoki-giskard commited on
Commit
85f4771
1 Parent(s): 8c47a22

Remove run_local and add tips

Browse files
app_text_classification.py CHANGED
@@ -7,7 +7,6 @@ from text_classification_ui_helpers import (
7
  get_related_datasets_from_leaderboard,
8
  align_columns_and_show_prediction,
9
  check_dataset,
10
- deselect_run_inference,
11
  precheck_model_ds_enable_example_btn,
12
  select_run_mode,
13
  try_submit,
@@ -81,14 +80,37 @@ def get_demo():
81
  column_mappings.append(gr.Dropdown(visible=False))
82
 
83
  with gr.Accordion(label="Model Wrap Advance Config", open=True):
84
- run_inference = gr.Checkbox(value=True, label="Run with Inference API")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  inference_token = gr.Textbox(
 
86
  value="",
87
  label="HF Token for Inference API",
88
  visible=True,
89
  interactive=True,
90
  )
91
- run_local = gr.Checkbox(value=False, label="Run Locally with Pipeline [Slow]")
92
 
93
 
94
  with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
@@ -144,13 +166,7 @@ def get_demo():
144
  run_inference.change(
145
  select_run_mode,
146
  inputs=[run_inference],
147
- outputs=[inference_token, run_local],
148
- )
149
-
150
- run_local.change(
151
- deselect_run_inference,
152
- inputs=[run_local],
153
- outputs=[inference_token, run_inference],
154
  )
155
 
156
  gr.on(
@@ -230,7 +246,6 @@ def get_demo():
230
  dataset_id_input,
231
  dataset_config_input,
232
  dataset_split_input,
233
- run_local,
234
  run_inference,
235
  inference_token,
236
  uid_label,
@@ -248,7 +263,6 @@ def get_demo():
248
  gr.on(
249
  triggers=[
250
  run_inference.input,
251
- run_local.input,
252
  inference_token.input,
253
  scanners.input,
254
  ],
 
7
  get_related_datasets_from_leaderboard,
8
  align_columns_and_show_prediction,
9
  check_dataset,
 
10
  precheck_model_ds_enable_example_btn,
11
  select_run_mode,
12
  try_submit,
 
80
  column_mappings.append(gr.Dropdown(visible=False))
81
 
82
  with gr.Accordion(label="Model Wrap Advance Config", open=True):
83
+ run_inference = gr.Checkbox(
84
+ value=True,
85
+ label="Run with HF Inference API"
86
+ )
87
+ gr.HTML(
88
+ value="""
89
+ We recommend to use
90
+ <a href="https://huggingface.co/docs/api-inference/detailed_parameters#text-classification-task">
91
+ Hugging Face Inference API
92
+ </a>
93
+ for the evaluation,
94
+ which requires your <a href="https://huggingface.co/settings/tokens">HF token</a>.
95
+ <br/>
96
+ Otherwise, an
97
+ <a href="https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.TextClassificationPipeline">
98
+ HF pipeline
99
+ </a>
100
+ will be created and run in this Space. It takes more time to get the result.
101
+ <br/>
102
+ <b>
103
+ Do not worry, your HF token is only used in this Space for your evaluation.
104
+ </b>
105
+ """,
106
+ )
107
  inference_token = gr.Textbox(
108
+ placeholder="hf-xxxxxxxxxxxxxxxxxxxx",
109
  value="",
110
  label="HF Token for Inference API",
111
  visible=True,
112
  interactive=True,
113
  )
 
114
 
115
 
116
  with gr.Accordion(label="Scanner Advance Config (optional)", open=False):
 
166
  run_inference.change(
167
  select_run_mode,
168
  inputs=[run_inference],
169
+ outputs=[inference_token],
 
 
 
 
 
 
170
  )
171
 
172
  gr.on(
 
246
  dataset_id_input,
247
  dataset_config_input,
248
  dataset_split_input,
 
249
  run_inference,
250
  inference_token,
251
  uid_label,
 
263
  gr.on(
264
  triggers=[
265
  run_inference.input,
 
266
  inference_token.input,
267
  scanners.input,
268
  ],
text_classification_ui_helpers.py CHANGED
@@ -105,16 +105,9 @@ def check_dataset(dataset_id, dataset_config=None, dataset_split=None):
105
 
106
  def select_run_mode(run_inf):
107
  if run_inf:
108
- return (gr.update(visible=True), gr.update(value=False))
109
  else:
110
- return (gr.update(visible=False), gr.update(value=True))
111
-
112
-
113
- def deselect_run_inference(run_local):
114
- if run_local:
115
- return (gr.update(visible=False), gr.update(value=False))
116
- else:
117
- return (gr.update(visible=True), gr.update(value=True))
118
 
119
 
120
  def write_column_mapping_to_config(uid, *labels):
@@ -320,7 +313,7 @@ def construct_label_and_feature_mapping(all_mappings):
320
  return label_mapping, feature_mapping
321
 
322
 
323
- def try_submit(m_id, d_id, config, split, local, inference, inference_token, uid):
324
  all_mappings = read_column_mapping(uid)
325
  check_column_mapping_keys_validity(all_mappings)
326
  label_mapping, feature_mapping = construct_label_and_feature_mapping(all_mappings)
@@ -329,8 +322,7 @@ def try_submit(m_id, d_id, config, split, local, inference, inference_token, uid
329
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
330
  leaderboard_dataset = LEADERBOARD
331
 
332
- if local:
333
- inference_type = "hf_pipeline"
334
  if inference and inference_token:
335
  inference_type = "hf_inference_api"
336
 
 
105
 
106
  def select_run_mode(run_inf):
107
  if run_inf:
108
+ return gr.update(visible=True)
109
  else:
110
+ return gr.update(visible=False)
 
 
 
 
 
 
 
111
 
112
 
113
  def write_column_mapping_to_config(uid, *labels):
 
313
  return label_mapping, feature_mapping
314
 
315
 
316
+ def try_submit(m_id, d_id, config, split, inference, inference_token, uid):
317
  all_mappings = read_column_mapping(uid)
318
  check_column_mapping_keys_validity(all_mappings)
319
  label_mapping, feature_mapping = construct_label_and_feature_mapping(all_mappings)
 
322
  if os.environ.get("SPACE_ID") == "giskardai/giskard-evaluator":
323
  leaderboard_dataset = LEADERBOARD
324
 
325
+ inference_type = "hf_pipeline"
 
326
  if inference and inference_token:
327
  inference_type = "hf_inference_api"
328