de-arena / app.py
yzabc007's picture
Update space
da96aa6
raw
history blame
10.7 kB
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns, SearchColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
COMING_SOON_TEXT
)
from src.display.css_html_js import custom_css
from src.display.utils import (
BENCHMARK_COLS,
COLS,
EVAL_COLS,
EVAL_TYPES,
AutoEvalColumn,
ModelType,
fields,
WeightType,
Precision
)
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
from src.populate import get_evaluation_queue_df, get_leaderboard_df, get_model_leaderboard_df
from src.submission.submit import add_new_eval
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
print(EVAL_REQUESTS_PATH)
snapshot_download(
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
)
except Exception:
restart_space()
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
(
finished_eval_queue_df,
running_eval_queue_df,
pending_eval_queue_df,
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
def init_leaderboard(dataframe):
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
return Leaderboard(
value=dataframe,
datatype=[c.type for c in fields(AutoEvalColumn)],
select_columns=None,
# SelectColumns(
# default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
# cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
# label="Select Columns to Display:",
# ),
# search_columns=None,
# search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
search_columns=SearchColumns(primary_column=AutoEvalColumn.model.name, secondary_columns=[],
placeholder="Search by the model name",
label="Searching"),
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
filter_columns=None,
# [
# ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
# ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
# ColumnFilter(
# AutoEvalColumn.params.name,
# type="slider",
# min=0.01,
# max=150,
# label="Select the number of parameters (B)",
# ),
# ColumnFilter(
# AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
# ),
# ],
# bool_checkboxgroup_label="Hide models",
interactive=False,
)
model_result_path = "./src/results/models_2024-10-07-14:50:12.666068.jsonl"
model_leaderboard_df = get_model_leaderboard_df(model_result_path)
def overall_leaderboard(dataframe):
if dataframe is None or dataframe.empty:
raise ValueError("Leaderboard DataFrame is empty or None.")
return Leaderboard(
value=dataframe,
datatype=[c.type for c in fields(AutoEvalColumn)],
select_columns=None,
search_columns=SearchColumns(primary_column=AutoEvalColumn.model.name, secondary_columns=[],
placeholder="Search by the model name",
label="Searching"),
hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
filter_columns=None,
interactive=False,
)
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("๐Ÿ… Overview", elem_id="llm-benchmark-tab-table", id=0):
# leaderboard = init_leaderboard(LEADERBOARD_DF)
leaderboard = overall_leaderboard(model_leaderboard_df)
with gr.TabItem("๐ŸŽฏ Overall", elem_id="llm-benchmark-tab-table", id=1):
leaderboard = overall_leaderboard(model_leaderboard_df)
with gr.TabItem("๐Ÿ”ข Math", elem_id="math-tab-table", id=2):
# leaderboard = init_leaderboard(LEADERBOARD_DF)
with gr.TabItem("๐Ÿงฎ Algebra", elem_id="algebra_subtab", id=0, elem_classes="subtab"):
leaderboard = overall_leaderboard(model_leaderboard_df)
with gr.TabItem("๐Ÿ“ Geometry", elem_id="geometry_subtab", id=1, elem_classes="subtab"):
leaderboard = overall_leaderboard(model_leaderboard_df)
with gr.TabItem("๐Ÿ“Š Probability", elem_id="prob_subtab", id=2, elem_classes="subtab"):
leaderboard = overall_leaderboard(model_leaderboard_df)
with gr.TabItem("๐Ÿง  Reasoning", elem_id="reasonong-tab-table", id=3):
with gr.TabItem("๐Ÿงฉ Logical", elem_id="logical_subtab", id=0, elem_classes="subtab"):
leaderboard = overall_leaderboard(model_leaderboard_df)
with gr.TabItem("๐Ÿ—ฃ๏ธ Social", elem_id="social_subtab", id=1, elem_classes="subtab"):
leaderboard = overall_leaderboard(model_leaderboard_df)
with gr.TabItem("</> Coding", elem_id="coding-tab-table", id=4):
gr.Markdown(COMING_SOON_TEXT, elem_classes="markdown-text")
with gr.TabItem("๐Ÿ”ฌ Science", elem_id="science-table", id=5):
gr.Markdown(COMING_SOON_TEXT, elem_classes="markdown-text")
with gr.TabItem("๐Ÿ“ About", elem_id="llm-benchmark-tab-table", id=6):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
'''
with gr.TabItem("๐Ÿš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
with gr.Column():
with gr.Row():
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
with gr.Column():
with gr.Accordion(
f"โœ… Finished Evaluations ({len(finished_eval_queue_df)})",
open=False,
):
with gr.Row():
finished_eval_table = gr.components.Dataframe(
value=finished_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"๐Ÿ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
open=False,
):
with gr.Row():
running_eval_table = gr.components.Dataframe(
value=running_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Accordion(
f"โณ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
open=False,
):
with gr.Row():
pending_eval_table = gr.components.Dataframe(
value=pending_eval_queue_df,
headers=EVAL_COLS,
datatype=EVAL_TYPES,
row_count=5,
)
with gr.Row():
gr.Markdown("# โœ‰๏ธโœจ Submit your model here!", elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_name_textbox = gr.Textbox(label="Model name")
revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
model_type = gr.Dropdown(
choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
label="Model type",
multiselect=False,
value=None,
interactive=True,
)
with gr.Column():
precision = gr.Dropdown(
choices=[i.value.name for i in Precision if i != Precision.Unknown],
label="Precision",
multiselect=False,
value="float16",
interactive=True,
)
weight_type = gr.Dropdown(
choices=[i.value.name for i in WeightType],
label="Weights type",
multiselect=False,
value="Original",
interactive=True,
)
base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
submit_button = gr.Button("Submit Eval")
submission_result = gr.Markdown()
submit_button.click(
add_new_eval,
[
model_name_textbox,
base_model_name_textbox,
revision_name_textbox,
precision,
weight_type,
model_type,
],
submission_result,
)
'''
with gr.Row():
with gr.Accordion("๐Ÿ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=20,
elem_id="citation-button",
show_copy_button=True,
)
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()