Spaces:
Runtime error
Runtime error
File size: 5,908 Bytes
f766ce9 9c49811 f766ce9 9c49811 f766ce9 9c49811 e8879cc f766ce9 e8879cc f766ce9 9c49811 1e768ec 9c49811 f766ce9 8b7a945 f766ce9 e8879cc f766ce9 e8879cc f766ce9 e8879cc f766ce9 e8879cc f8b3d0f f766ce9 e8879cc f766ce9 e8879cc e93c18c e8879cc f766ce9 e8879cc f766ce9 e8879cc f766ce9 e8879cc f8b3d0f f766ce9 f8b3d0f 61eca2d f8b3d0f f766ce9 57ca843 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import gradio as gr
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from src.about import (
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.display.utils import (
QA_BENCHMARK_COLS,
COLS,
TYPES,
AutoEvalColumnQA,
fields
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_leaderboard_df
from utils import update_table
from src.benchmarks import DOMAIN_COLS_QA, LANG_COLS_QA, metric_list
def restart_space():
API.restart_space(repo_id=REPO_ID)
# try:
# print(EVAL_REQUESTS_PATH)
# snapshot_download(
# repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30,
# token=TOKEN
# )
# except Exception:
# restart_space()
# try:
# print(EVAL_RESULTS_PATH)
# snapshot_download(
# repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30,
# token=TOKEN
# )
# except Exception:
# restart_space()
raw_data_qa, original_df_qa = get_leaderboard_df(
EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, QA_BENCHMARK_COLS, task='qa', metric='ndcg_at_3')
print(f'data loaded: {len(raw_data_qa)}, {original_df_qa.shape}')
leaderboard_df = original_df_qa.copy()
# (
# finished_eval_queue_df,
# running_eval_queue_df,
# pending_eval_queue_df,
# ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("QA", elem_id="llm-benchmark-tab-table", id=0):
with gr.Row():
with gr.Column():
with gr.Row():
search_bar = gr.Textbox(
placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
show_label=False,
elem_id="search-bar",
)
# select domain
with gr.Row():
selected_domains = gr.CheckboxGroup(
choices=DOMAIN_COLS_QA,
value=DOMAIN_COLS_QA,
label="Select the domains",
elem_id="domain-column-select",
interactive=True,
)
# select language
with gr.Row():
selected_langs = gr.CheckboxGroup(
choices=LANG_COLS_QA,
value=LANG_COLS_QA,
label="Select the languages",
elem_id="language-column-select",
interactive=True
)
# select reranking models
reranking_models = list(frozenset([eval_result.reranking_model for eval_result in raw_data_qa]))
with gr.Row():
selected_rerankings = gr.CheckboxGroup(
choices=reranking_models,
value=reranking_models,
label="Select the reranking models",
elem_id="reranking-select",
interactive=True
)
with gr.Column(min_width=320):
selected_metric = gr.Dropdown(
choices=metric_list,
value=metric_list[0],
label="Select the metric",
interactive=True,
elem_id="metric-select",
)
# reload the leaderboard_df and raw_data when selected_metric is changed
leaderboard_table = gr.components.Dataframe(
value=leaderboard_df,
# headers=shown_columns,
# datatype=TYPES,
elem_id="leaderboard-table",
interactive=False,
visible=True,
)
# Dummy leaderboard for handling the case when the user uses backspace key
hidden_leaderboard_table_for_search = gr.components.Dataframe(
value=original_df_qa,
# headers=COLS,
# datatype=TYPES,
visible=False,
)
search_bar.submit(
update_table,
[
hidden_leaderboard_table_for_search,
selected_domains,
selected_langs,
selected_rerankings,
search_bar,
],
leaderboard_table,
)
for selector in [
selected_domains, selected_langs, selected_rerankings
]:
selector.change(
update_table,
[
hidden_leaderboard_table_for_search,
selected_domains,
selected_langs,
selected_rerankings,
search_bar,
],
leaderboard_table,
queue=True,
)
with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()
|