Spaces:
Runtime error
Runtime error
add datasts csv
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ from src.display.about import (
|
|
12 |
EVALUATION_QUEUE_TEXT,
|
13 |
INTRODUCTION_TEXT,
|
14 |
LLM_BENCHMARKS_TEXT,
|
|
|
15 |
FAQ_TEXT,
|
16 |
TITLE,
|
17 |
)
|
@@ -34,6 +35,7 @@ from src.display.utils import (
|
|
34 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
35 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
36 |
from src.submission.submit import add_new_eval
|
|
|
37 |
|
38 |
|
39 |
def restart_space():
|
@@ -47,6 +49,7 @@ def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout
|
|
47 |
except Exception:
|
48 |
restart_space()
|
49 |
|
|
|
50 |
|
51 |
ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
|
52 |
ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
|
@@ -222,6 +225,17 @@ with demo:
|
|
222 |
|
223 |
with gr.TabItem("๐ About", elem_id="llm-benchmark-tab-table", id=2):
|
224 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
|
226 |
|
227 |
with gr.TabItem("๐ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
|
|
12 |
EVALUATION_QUEUE_TEXT,
|
13 |
INTRODUCTION_TEXT,
|
14 |
LLM_BENCHMARKS_TEXT,
|
15 |
+
LLM_BENCHMARKS_DETAILS,
|
16 |
FAQ_TEXT,
|
17 |
TITLE,
|
18 |
)
|
|
|
35 |
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO
|
36 |
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
37 |
from src.submission.submit import add_new_eval
|
38 |
+
from src.utils import get_dataset_summary_table
|
39 |
|
40 |
|
41 |
def restart_space():
|
|
|
49 |
except Exception:
|
50 |
restart_space()
|
51 |
|
52 |
+
dataset_df = get_dataset_summary_table(file_path='blog/Hallucination-Leaderboard-Summary.csv')
|
53 |
|
54 |
ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
|
55 |
ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30)
|
|
|
225 |
|
226 |
with gr.TabItem("๐ About", elem_id="llm-benchmark-tab-table", id=2):
|
227 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
228 |
+
print(f'dataset df columns: {list(dataset_df.columns)}')
|
229 |
+
dataset_table = gr.components.Dataframe(
|
230 |
+
value=dataset_df,
|
231 |
+
headers=list(dataset_df.columns),
|
232 |
+
datatype=['str', 'markdown', 'str', 'str', 'str'],
|
233 |
+
elem_id="dataset-table",
|
234 |
+
interactive=False,
|
235 |
+
visible=True,
|
236 |
+
column_widths=["15%", "20%"]
|
237 |
+
)
|
238 |
+
gr.Markdown(LLM_BENCHMARKS_DETAILS, elem_classes="markdown-text")
|
239 |
gr.Markdown(FAQ_TEXT, elem_classes="markdown-text")
|
240 |
|
241 |
with gr.TabItem("๐ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|