ZeroCommand commited on
Commit
7f4008b
1 Parent(s): f227810

fix log refresh

Browse files
app.py CHANGED
@@ -11,7 +11,7 @@ if threading.current_thread() is not threading.main_thread():
11
  try:
12
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green")) as demo:
13
  with gr.Tab("Text Classification"):
14
- get_demo_text_classification()
15
  with gr.Tab("Leaderboard"):
16
  get_demo_leaderboard()
17
 
 
11
  try:
12
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="green")) as demo:
13
  with gr.Tab("Text Classification"):
14
+ get_demo_text_classification(demo)
15
  with gr.Tab("Leaderboard"):
16
  get_demo_leaderboard()
17
 
app_text_classification.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  import uuid
3
- from io_utils import read_scanners, write_scanners, read_inference_type, write_inference_type
4
  from wordings import INTRODUCTION_MD, CONFIRM_MAPPING_DETAILS_MD
5
  from text_classification_ui_helpers import try_submit, check_dataset_and_get_config, check_dataset_and_get_split, check_model_and_show_prediction, write_column_mapping_to_config, get_logs_file
6
 
@@ -11,126 +11,125 @@ EXAMPLE_MODEL_ID = 'cardiffnlp/twitter-roberta-base-sentiment-latest'
11
  EXAMPLE_DATA_ID = 'tweet_eval'
12
  CONFIG_PATH='./config.yaml'
13
 
14
- def get_demo():
15
- with gr.Blocks() as demo:
16
- with gr.Row():
17
- gr.Markdown(INTRODUCTION_MD)
18
- with gr.Row():
19
- model_id_input = gr.Textbox(
20
- label="Hugging Face model id",
21
- placeholder=EXAMPLE_MODEL_ID + " (press enter to confirm)",
22
- )
23
 
24
- dataset_id_input = gr.Textbox(
25
- label="Hugging Face Dataset id",
26
- placeholder=EXAMPLE_DATA_ID + " (press enter to confirm)",
27
- )
28
-
29
- with gr.Row():
30
- dataset_config_input = gr.Dropdown(label='Dataset Config', visible=False)
31
- dataset_split_input = gr.Dropdown(label='Dataset Split', visible=False)
32
-
33
- with gr.Row():
34
- example_input = gr.Markdown('Example Input', visible=False)
35
- with gr.Row():
36
- example_prediction = gr.Label(label='Model Prediction Sample', visible=False)
37
-
38
- with gr.Row():
39
- with gr.Accordion(label='Label and Feature Mapping', visible=False, open=False) as column_mapping_accordion:
40
- with gr.Row():
41
- gr.Markdown(CONFIRM_MAPPING_DETAILS_MD)
42
- column_mappings = []
43
- with gr.Row():
44
- with gr.Column():
45
- for _ in range(MAX_LABELS):
46
- column_mappings.append(gr.Dropdown(visible=False))
47
- with gr.Column():
48
- for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
49
- column_mappings.append(gr.Dropdown(visible=False))
50
-
51
- with gr.Accordion(label='Model Wrap Advance Config (optional)', open=False):
52
- run_local = gr.Checkbox(value=True, label="Run in this Space")
53
- use_inference = read_inference_type('./config.yaml') == 'hf_inference_api'
54
- run_inference = gr.Checkbox(value=use_inference, label="Run with Inference API")
55
-
56
- with gr.Accordion(label='Scanner Advance Config (optional)', open=False):
57
- selected = read_scanners('./config.yaml')
58
- # currently we remove data_leakage from the default scanners
59
- # Reason: data_leakage barely raises any issues and takes too many requests
60
- # when using inference API, causing rate limit error
61
- scan_config = selected + ['data_leakage']
62
- scanners = gr.CheckboxGroup(choices=scan_config, value=selected, label='Scan Settings', visible=True)
63
 
64
- with gr.Row():
65
- run_btn = gr.Button(
66
- "Get Evaluation Result",
67
- variant="primary",
68
- interactive=True,
69
- size="lg",
70
- )
 
 
 
 
 
 
71
 
72
- with gr.Row():
73
- uid = uuid.uuid4()
74
- uid_label = gr.Textbox(label="Evaluation ID:", value=uid, visible=False)
75
- logs = gr.Textbox(label="Giskard Bot Evaluation Log:", visible=False)
76
- demo.load(get_logs_file, uid_label, logs, every=0.5)
77
-
78
- gr.on(triggers=[label.change for label in column_mappings],
79
- fn=write_column_mapping_to_config,
80
- inputs=[dataset_id_input, dataset_config_input, dataset_split_input, *column_mappings])
81
 
82
- gr.on(triggers=[model_id_input.change, dataset_config_input.change, dataset_split_input.change],
83
- fn=check_model_and_show_prediction,
84
- inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input],
85
- outputs=[example_input, example_prediction, column_mapping_accordion, *column_mappings])
86
 
87
- dataset_id_input.blur(check_dataset_and_get_config, dataset_id_input, dataset_config_input)
88
 
89
- dataset_config_input.change(
90
- check_dataset_and_get_split,
91
- inputs=[dataset_id_input, dataset_config_input],
92
- outputs=[dataset_split_input])
93
 
94
- scanners.change(
95
- write_scanners,
96
- inputs=scanners
97
- )
98
 
99
- run_inference.change(
100
- write_inference_type,
101
- inputs=[run_inference]
102
- )
103
 
104
- gr.on(
105
- triggers=[
106
- run_btn.click,
107
- ],
108
- fn=try_submit,
109
- inputs=[
110
- model_id_input,
111
- dataset_id_input,
112
- dataset_config_input,
113
- dataset_split_input,
114
- run_local,
115
- uid_label],
116
- outputs=[run_btn, logs])
117
-
118
- def enable_run_btn():
119
- return gr.update(interactive=True)
120
- gr.on(
121
- triggers=[
122
- model_id_input.change,
123
- dataset_config_input.change,
124
- dataset_split_input.change,
125
- run_inference.change,
126
- run_local.change,
127
- scanners.change],
128
- fn=enable_run_btn,
129
- inputs=None,
130
- outputs=[run_btn])
131
-
132
- gr.on(
133
- triggers=[label.change for label in column_mappings],
134
- fn=enable_run_btn,
135
- inputs=None,
136
- outputs=[run_btn])
 
1
  import gradio as gr
2
  import uuid
3
+ from io_utils import read_scanners, write_scanners, read_inference_type, write_inference_type, get_logs_file
4
  from wordings import INTRODUCTION_MD, CONFIRM_MAPPING_DETAILS_MD
5
  from text_classification_ui_helpers import try_submit, check_dataset_and_get_config, check_dataset_and_get_split, check_model_and_show_prediction, write_column_mapping_to_config, get_logs_file
6
 
 
11
  EXAMPLE_DATA_ID = 'tweet_eval'
12
  CONFIG_PATH='./config.yaml'
13
 
14
+ def get_demo(demo):
15
+ with gr.Row():
16
+ gr.Markdown(INTRODUCTION_MD)
17
+ with gr.Row():
18
+ model_id_input = gr.Textbox(
19
+ label="Hugging Face model id",
20
+ placeholder=EXAMPLE_MODEL_ID + " (press enter to confirm)",
21
+ )
 
22
 
23
+ dataset_id_input = gr.Textbox(
24
+ label="Hugging Face Dataset id",
25
+ placeholder=EXAMPLE_DATA_ID + " (press enter to confirm)",
26
+ )
27
+
28
+ with gr.Row():
29
+ dataset_config_input = gr.Dropdown(label='Dataset Config', visible=False)
30
+ dataset_split_input = gr.Dropdown(label='Dataset Split', visible=False)
31
+
32
+ with gr.Row():
33
+ example_input = gr.Markdown('Example Input', visible=False)
34
+ with gr.Row():
35
+ example_prediction = gr.Label(label='Model Prediction Sample', visible=False)
36
+
37
+ with gr.Row():
38
+ with gr.Accordion(label='Label and Feature Mapping', visible=False, open=False) as column_mapping_accordion:
39
+ with gr.Row():
40
+ gr.Markdown(CONFIRM_MAPPING_DETAILS_MD)
41
+ column_mappings = []
42
+ with gr.Row():
43
+ with gr.Column():
44
+ for _ in range(MAX_LABELS):
45
+ column_mappings.append(gr.Dropdown(visible=False))
46
+ with gr.Column():
47
+ for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
48
+ column_mappings.append(gr.Dropdown(visible=False))
49
+
50
+ with gr.Accordion(label='Model Wrap Advance Config (optional)', open=False):
51
+ run_local = gr.Checkbox(value=True, label="Run in this Space")
52
+ use_inference = read_inference_type('./config.yaml') == 'hf_inference_api'
53
+ run_inference = gr.Checkbox(value=use_inference, label="Run with Inference API")
54
+
55
+ with gr.Accordion(label='Scanner Advance Config (optional)', open=False):
56
+ selected = read_scanners('./config.yaml')
57
+ # currently we remove data_leakage from the default scanners
58
+ # Reason: data_leakage barely raises any issues and takes too many requests
59
+ # when using inference API, causing rate limit error
60
+ scan_config = selected + ['data_leakage']
61
+ scanners = gr.CheckboxGroup(choices=scan_config, value=selected, label='Scan Settings', visible=True)
62
 
63
+ with gr.Row():
64
+ run_btn = gr.Button(
65
+ "Get Evaluation Result",
66
+ variant="primary",
67
+ interactive=True,
68
+ size="lg",
69
+ )
70
+
71
+ with gr.Row():
72
+ uid = uuid.uuid4()
73
+ uid_label = gr.Textbox(label="Evaluation ID:", value=uid, visible=False, interactive=False)
74
+ logs = gr.Textbox(label="Giskard Bot Evaluation Log:", visible=False)
75
+ demo.load(get_logs_file, uid_label, logs, every=0.5)
76
 
77
+ gr.on(triggers=[label.change for label in column_mappings],
78
+ fn=write_column_mapping_to_config,
79
+ inputs=[dataset_id_input, dataset_config_input, dataset_split_input, *column_mappings])
 
 
 
 
 
 
80
 
81
+ gr.on(triggers=[model_id_input.change, dataset_config_input.change, dataset_split_input.change],
82
+ fn=check_model_and_show_prediction,
83
+ inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input],
84
+ outputs=[example_input, example_prediction, column_mapping_accordion, *column_mappings])
85
 
86
+ dataset_id_input.blur(check_dataset_and_get_config, dataset_id_input, dataset_config_input)
87
 
88
+ dataset_config_input.change(
89
+ check_dataset_and_get_split,
90
+ inputs=[dataset_id_input, dataset_config_input],
91
+ outputs=[dataset_split_input])
92
 
93
+ scanners.change(
94
+ write_scanners,
95
+ inputs=scanners
96
+ )
97
 
98
+ run_inference.change(
99
+ write_inference_type,
100
+ inputs=[run_inference]
101
+ )
102
 
103
+ gr.on(
104
+ triggers=[
105
+ run_btn.click,
106
+ ],
107
+ fn=try_submit,
108
+ inputs=[
109
+ model_id_input,
110
+ dataset_id_input,
111
+ dataset_config_input,
112
+ dataset_split_input,
113
+ run_local,
114
+ uid_label],
115
+ outputs=[run_btn, logs])
116
+
117
+ def enable_run_btn():
118
+ return (gr.update(interactive=True))
119
+ gr.on(
120
+ triggers=[
121
+ model_id_input.change,
122
+ dataset_config_input.change,
123
+ dataset_split_input.change,
124
+ run_inference.change,
125
+ run_local.change,
126
+ scanners.change],
127
+ fn=enable_run_btn,
128
+ inputs=None,
129
+ outputs=[run_btn])
130
+
131
+ gr.on(
132
+ triggers=[label.change for label in column_mappings],
133
+ fn=enable_run_btn,
134
+ inputs=None,
135
+ outputs=[run_btn])
io_utils.py CHANGED
@@ -52,7 +52,8 @@ def read_column_mapping(path):
52
  column_mapping = {}
53
  with open(path, "r") as f:
54
  config = yaml.load(f, Loader=yaml.FullLoader)
55
- column_mapping = config.get("column_mapping", dict())
 
56
  return column_mapping
57
 
58
  # write column mapping to yaml file
@@ -77,6 +78,13 @@ def convert_column_mapping_to_json(df, label=""):
77
  column_mapping[label].append(row.tolist())
78
  return column_mapping
79
 
 
 
 
 
 
 
 
80
  def write_log_to_user_file(id, log):
81
  with open(f"./tmp/{id}_log", "a") as f:
82
  f.write(log)
 
52
  column_mapping = {}
53
  with open(path, "r") as f:
54
  config = yaml.load(f, Loader=yaml.FullLoader)
55
+ if config:
56
+ column_mapping = config.get("column_mapping", dict())
57
  return column_mapping
58
 
59
  # write column mapping to yaml file
 
78
  column_mapping[label].append(row.tolist())
79
  return column_mapping
80
 
81
+ def get_logs_file(uid):
82
+ try:
83
+ file = open(f"./tmp/{uid}_log", "r")
84
+ return file.read()
85
+ except Exception:
86
+ return "Log file does not exist"
87
+
88
  def write_log_to_user_file(id, log):
89
  with open(f"./tmp/{id}_log", "a") as f:
90
  f.write(log)
text_classification_ui_helpers.py CHANGED
@@ -43,6 +43,9 @@ def write_column_mapping_to_config(dataset_id, dataset_config, dataset_split, *l
43
  labels = [*labels]
44
  all_mappings = read_column_mapping(CONFIG_PATH)
45
 
 
 
 
46
  if "labels" not in all_mappings.keys():
47
  all_mappings["labels"] = dict()
48
  for i, label in enumerate(labels[:MAX_LABELS]):
@@ -58,7 +61,9 @@ def write_column_mapping_to_config(dataset_id, dataset_config, dataset_split, *l
58
 
59
  def list_labels_and_features_from_dataset(ds_labels, ds_features, model_id2label):
60
  model_labels = list(model_id2label.values())
61
- lables = [gr.Dropdown(label=f"{label}", choices=model_labels, value=model_id2label[i], interactive=True, visible=True) for i, label in enumerate(ds_labels[:MAX_LABELS])]
 
 
62
  lables += [gr.Dropdown(visible=False) for _ in range(MAX_LABELS - len(lables))]
63
  # TODO: Substitute 'text' with more features for zero-shot
64
  features = [gr.Dropdown(label=f"{feature}", choices=ds_features, value=ds_features[0], interactive=True, visible=True) for feature in ['text']]
@@ -90,7 +95,7 @@ def check_model_and_show_prediction(model_id, dataset_id, dataset_config, datase
90
 
91
  # when dataset does not have labels or features
92
  if not isinstance(ds_labels, list) or not isinstance(ds_features, list):
93
- gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
94
  return (
95
  gr.update(visible=False),
96
  gr.update(visible=False),
@@ -123,29 +128,21 @@ def check_model_and_show_prediction(model_id, dataset_id, dataset_config, datase
123
  *column_mappings
124
  )
125
 
126
- def get_logs_file(uid):
127
- print("read log file")
128
- file = open(f"./tmp/{uid}_log", "r")
129
- contents = file.readlines()
130
- print(contents)
131
- file.close()
132
- return '\n'.join(contents)
133
-
134
  def try_submit(m_id, d_id, config, split, local, uid):
135
  all_mappings = read_column_mapping(CONFIG_PATH)
136
 
137
  if all_mappings is None:
138
  gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
139
- return gr.update(interactive=True)
140
 
141
  if "labels" not in all_mappings.keys():
142
  gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
143
- return gr.update(interactive=True)
144
  label_mapping = all_mappings["labels"]
145
 
146
  if "features" not in all_mappings.keys():
147
  gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
148
- return gr.update(interactive=True)
149
  feature_mapping = all_mappings["features"]
150
 
151
  # TODO: Set column mapping for some dataset such as `amazon_polarity`
@@ -175,7 +172,7 @@ def try_submit(m_id, d_id, config, split, local, uid):
175
 
176
  return (
177
  gr.update(interactive=False),
178
- gr.update(value=get_logs_file(uid),lines=5, visible=True, interactive=False))
179
 
180
  else:
181
  gr.Info("TODO: Submit task to an endpoint")
 
43
  labels = [*labels]
44
  all_mappings = read_column_mapping(CONFIG_PATH)
45
 
46
+ if all_mappings is None:
47
+ all_mappings = dict()
48
+
49
  if "labels" not in all_mappings.keys():
50
  all_mappings["labels"] = dict()
51
  for i, label in enumerate(labels[:MAX_LABELS]):
 
61
 
62
  def list_labels_and_features_from_dataset(ds_labels, ds_features, model_id2label):
63
  model_labels = list(model_id2label.values())
64
+ len_model_labels = len(model_labels)
65
+ print(model_labels, model_id2label, 3%len_model_labels)
66
+ lables = [gr.Dropdown(label=f"{label}", choices=model_labels, value=model_id2label[i%len_model_labels], interactive=True, visible=True) for i, label in enumerate(ds_labels[:MAX_LABELS])]
67
  lables += [gr.Dropdown(visible=False) for _ in range(MAX_LABELS - len(lables))]
68
  # TODO: Substitute 'text' with more features for zero-shot
69
  features = [gr.Dropdown(label=f"{feature}", choices=ds_features, value=ds_features[0], interactive=True, visible=True) for feature in ['text']]
 
95
 
96
  # when dataset does not have labels or features
97
  if not isinstance(ds_labels, list) or not isinstance(ds_features, list):
98
+ # gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
99
  return (
100
  gr.update(visible=False),
101
  gr.update(visible=False),
 
128
  *column_mappings
129
  )
130
 
 
 
 
 
 
 
 
 
131
  def try_submit(m_id, d_id, config, split, local, uid):
132
  all_mappings = read_column_mapping(CONFIG_PATH)
133
 
134
  if all_mappings is None:
135
  gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
136
+ return (gr.update(interactive=True), gr.update(visible=False))
137
 
138
  if "labels" not in all_mappings.keys():
139
  gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
140
+ return (gr.update(interactive=True), gr.update(visible=False))
141
  label_mapping = all_mappings["labels"]
142
 
143
  if "features" not in all_mappings.keys():
144
  gr.Warning(CONFIRM_MAPPING_DETAILS_FAIL_RAW)
145
+ return (gr.update(interactive=True), gr.update(visible=False))
146
  feature_mapping = all_mappings["features"]
147
 
148
  # TODO: Set column mapping for some dataset such as `amazon_polarity`
 
172
 
173
  return (
174
  gr.update(interactive=False),
175
+ gr.update(lines=5, visible=True, interactive=False))
176
 
177
  else:
178
  gr.Info("TODO: Submit task to an endpoint")