Spaces:
Running
Running
inoki-giskard
commited on
Merge commit 'refs/pr/27' of hf.co:spaces/giskardai/giskard-evaluator into giskard-main
Browse files- app.py +1 -1
- app_debug.py +16 -14
- app_text_classification.py +2 -2
- io_utils.py +1 -1
- text_classification_ui_helpers.py +2 -1
- wordings.py +5 -0
app.py
CHANGED
@@ -18,7 +18,7 @@ try:
|
|
18 |
|
19 |
start_process_run_job()
|
20 |
|
21 |
-
demo.queue(max_size=
|
22 |
demo.launch(share=False)
|
23 |
atexit.register(stop_thread)
|
24 |
|
|
|
18 |
|
19 |
start_process_run_job()
|
20 |
|
21 |
+
demo.queue(max_size=1000)
|
22 |
demo.launch(share=False)
|
23 |
atexit.register(stop_thread)
|
24 |
|
app_debug.py
CHANGED
@@ -2,21 +2,21 @@ from os import listdir
|
|
2 |
from os.path import isfile, join
|
3 |
|
4 |
import gradio as gr
|
5 |
-
|
6 |
import pipe
|
|
|
7 |
|
8 |
LOG_PATH = "./tmp"
|
9 |
-
CONFIG_PATH = "./cicd/configs"
|
|
|
10 |
|
11 |
|
12 |
def get_accordions_of_files(path, files):
|
13 |
-
components = []
|
14 |
-
for
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
gr.Markdown(f.read())
|
20 |
return components
|
21 |
|
22 |
|
@@ -35,6 +35,9 @@ def get_accordions_of_config_files():
|
|
35 |
]
|
36 |
return get_accordions_of_files(CONFIG_PATH, config_files)
|
37 |
|
|
|
|
|
|
|
38 |
|
39 |
def get_demo(demo):
|
40 |
with gr.Row():
|
@@ -43,9 +46,8 @@ def get_demo(demo):
|
|
43 |
gr.Markdown(f"Current job: {pipe.current} Jobs in queue: {len(pipe.jobs)}")
|
44 |
else:
|
45 |
gr.Markdown("No jobs in queue, please submit an evaluation task.")
|
46 |
-
with gr.Accordion(label="Config Files", open=False):
|
47 |
-
config_accordion = get_accordions_of_config_files()
|
48 |
-
demo.load(get_accordions_of_config_files, outputs=config_accordion, every=1)
|
49 |
with gr.Accordion(label="Log Files", open=False):
|
50 |
-
|
51 |
-
demo.load(
|
|
|
|
|
|
2 |
from os.path import isfile, join
|
3 |
|
4 |
import gradio as gr
|
|
|
5 |
import pipe
|
6 |
+
from io_utils import get_logs_file
|
7 |
|
8 |
LOG_PATH = "./tmp"
|
9 |
+
CONFIG_PATH = "./cicd/configs/"
|
10 |
+
MAX_FILES_NUM = 20
|
11 |
|
12 |
|
13 |
def get_accordions_of_files(path, files):
|
14 |
+
components = [None for _ in range (0, MAX_FILES_NUM)]
|
15 |
+
for i in range(0, len(files)):
|
16 |
+
if i >= MAX_FILES_NUM:
|
17 |
+
break
|
18 |
+
with open(join(path, files[i]), "r") as f:
|
19 |
+
components[i] = f.read()
|
|
|
20 |
return components
|
21 |
|
22 |
|
|
|
35 |
]
|
36 |
return get_accordions_of_files(CONFIG_PATH, config_files)
|
37 |
|
38 |
+
def get_config_files():
|
39 |
+
config_files = [join(CONFIG_PATH, f) for f in listdir(CONFIG_PATH) if isfile(join(CONFIG_PATH, f)) and f.endswith(".yaml")]
|
40 |
+
return config_files
|
41 |
|
42 |
def get_demo(demo):
|
43 |
with gr.Row():
|
|
|
46 |
gr.Markdown(f"Current job: {pipe.current} Jobs in queue: {len(pipe.jobs)}")
|
47 |
else:
|
48 |
gr.Markdown("No jobs in queue, please submit an evaluation task.")
|
|
|
|
|
|
|
49 |
with gr.Accordion(label="Log Files", open=False):
|
50 |
+
logs = gr.Textbox(lines=10, visible=True, label="Log File")
|
51 |
+
demo.load(get_logs_file, None, logs, every=0.5)
|
52 |
+
with gr.Accordion(label="Config Files", open=False):
|
53 |
+
gr.Files(value=get_config_files, label="Config Files", every=10)
|
app_text_classification.py
CHANGED
@@ -42,7 +42,7 @@ def get_demo(demo):
|
|
42 |
dataset_split_input = gr.Dropdown(label="Dataset Split", visible=False)
|
43 |
|
44 |
with gr.Row():
|
45 |
-
example_input = gr.
|
46 |
with gr.Row():
|
47 |
example_prediction = gr.Label(label="Model Prediction Sample", visible=False)
|
48 |
|
@@ -103,7 +103,7 @@ def get_demo(demo):
|
|
103 |
|
104 |
with gr.Row():
|
105 |
logs = gr.Textbox(label="Giskard Bot Evaluation Log:", visible=False)
|
106 |
-
demo.load(get_logs_file,
|
107 |
|
108 |
dataset_id_input.change(
|
109 |
check_dataset_and_get_config,
|
|
|
42 |
dataset_split_input = gr.Dropdown(label="Dataset Split", visible=False)
|
43 |
|
44 |
with gr.Row():
|
45 |
+
example_input = gr.HTML(visible=False)
|
46 |
with gr.Row():
|
47 |
example_prediction = gr.Label(label="Model Prediction Sample", visible=False)
|
48 |
|
|
|
103 |
|
104 |
with gr.Row():
|
105 |
logs = gr.Textbox(label="Giskard Bot Evaluation Log:", visible=False)
|
106 |
+
demo.load(get_logs_file, None, logs, every=0.5)
|
107 |
|
108 |
dataset_id_input.change(
|
109 |
check_dataset_and_get_config,
|
io_utils.py
CHANGED
@@ -111,7 +111,7 @@ def convert_column_mapping_to_json(df, label=""):
|
|
111 |
return column_mapping
|
112 |
|
113 |
|
114 |
-
def get_logs_file(
|
115 |
try:
|
116 |
file = open(f"./tmp/temp_log", "r")
|
117 |
return file.read()
|
|
|
111 |
return column_mapping
|
112 |
|
113 |
|
114 |
+
def get_logs_file():
|
115 |
try:
|
116 |
file = open(f"./tmp/temp_log", "r")
|
117 |
return file.read()
|
text_classification_ui_helpers.py
CHANGED
@@ -7,6 +7,7 @@ import threading
|
|
7 |
import datasets
|
8 |
import gradio as gr
|
9 |
from transformers.pipelines import TextClassificationPipeline
|
|
|
10 |
|
11 |
from io_utils import (get_yaml_path, read_column_mapping, save_job_to_pipe,
|
12 |
write_column_mapping, write_inference_type,
|
@@ -181,7 +182,7 @@ def check_model_and_show_prediction(
|
|
181 |
ppl, dataset_id, dataset_config, dataset_split
|
182 |
)
|
183 |
return (
|
184 |
-
gr.update(value=prediction_input, visible=True),
|
185 |
gr.update(value=prediction_output, visible=True),
|
186 |
gr.update(visible=True, open=False),
|
187 |
*column_mappings,
|
|
|
7 |
import datasets
|
8 |
import gradio as gr
|
9 |
from transformers.pipelines import TextClassificationPipeline
|
10 |
+
from wordings import get_styled_input
|
11 |
|
12 |
from io_utils import (get_yaml_path, read_column_mapping, save_job_to_pipe,
|
13 |
write_column_mapping, write_inference_type,
|
|
|
182 |
ppl, dataset_id, dataset_config, dataset_split
|
183 |
)
|
184 |
return (
|
185 |
+
gr.update(value=get_styled_input(prediction_input), visible=True),
|
186 |
gr.update(value=prediction_output, visible=True),
|
187 |
gr.update(visible=True, open=False),
|
188 |
*column_mappings,
|
wordings.py
CHANGED
@@ -37,3 +37,8 @@ MAPPING_STYLED_ERROR_WARNING = """
|
|
37 |
Sorry, we cannot auto-align the labels/features of your dataset and model. Please double check.
|
38 |
</h3>
|
39 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
37 |
Sorry, we cannot auto-align the labels/features of your dataset and model. Please double check.
|
38 |
</h3>
|
39 |
"""
|
40 |
+
|
41 |
+
def get_styled_input(input):
|
42 |
+
return f"""<h3 style="text-align: center;color: #5ec26a; background-color: #e2fbe8; border-radius: 8px; padding: 10px; ">
|
43 |
+
Sample input: {input}
|
44 |
+
</h3>"""
|