LiveBench / app.py
pufanyi's picture
Refactor css_html_js.py to update Font Awesome version
b351f32
raw
history blame
5.9 kB
import gradio as gr
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import snapshot_download
from gradio.components.textbox import Textbox
from gradio.components.dataframe import Dataframe
from gradio.components.checkboxgroup import CheckboxGroup
# from fastchat.serve.monitor.monitor import build_leaderboard_tab, build_basic_stats_tab, basic_component_values, leader_component_values
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
EVALUATION_QUEUE_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
LINKS,
)
from src.display.css_html_js import (
custom_css,
CSS_EXTERNAL,
JS_EXTERNAL,
)
from src.display.utils import (
AutoEvalColumn,
fields,
)
from src.envs import (
API,
EVAL_DETAILED_RESULTS_PATH,
EVAL_RESULTS_PATH,
EVAL_DETAILED_RESULTS_REPO,
REPO_ID,
RESULTS_REPO,
TOKEN,
)
from src.populate import get_leaderboard_df
def restart_space():
API.restart_space(repo_id=REPO_ID)
### Space initialisation
try:
print(EVAL_DETAILED_RESULTS_REPO)
snapshot_download(
repo_id=EVAL_DETAILED_RESULTS_REPO,
local_dir=EVAL_DETAILED_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
try:
print(EVAL_RESULTS_PATH)
snapshot_download(
repo_id=RESULTS_REPO,
local_dir=EVAL_RESULTS_PATH,
repo_type="dataset",
tqdm_class=None,
etag_timeout=30,
token=TOKEN,
)
except Exception:
restart_space()
LEADERBOARD_DF = get_leaderboard_df(RESULTS_REPO)
def GET_DEFAULT_TEXTBOX():
return gr.Textbox("", placeholder="πŸ” Search Models... [press enter]", label="Filter Models by Name")
def GET_DEFAULT_CHECKBOX():
print("Choices:", [c.name for c in fields(AutoEvalColumn) if not c.hidden])
return gr.CheckboxGroup(
choices=[c.name for c in fields(AutoEvalColumn) if not c.hidden],
label="Select Columns to Display",
value=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
)
def init_leaderboard(dataframes):
subsets = list(dataframes.keys())
with gr.Row():
selected_subset = gr.Dropdown(choices=subsets, label="Select Dataset Subset", value=subsets[-1])
research_textbox = GET_DEFAULT_TEXTBOX()
selected_columns = GET_DEFAULT_CHECKBOX()
data = dataframes[subsets[-1]]
with gr.Row():
datatype = [c.type for c in fields(AutoEvalColumn)]
df = gr.Dataframe(data, datatype=datatype, type="pandas")
def refresh(subset):
global LEADERBOARD_DF
LEADERBOARD_DF = get_leaderboard_df(RESULTS_REPO)
default_columns = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default]
return update_data(subset, None, default_columns), GET_DEFAULT_TEXTBOX(), GET_DEFAULT_CHECKBOX()
def update_data(subset, search_term, selected_columns):
print("Subset:", subset)
print("Search Term:", search_term)
print("Selected Columns:", selected_columns)
filtered_data = dataframes[subset]
if search_term:
filtered_data = filtered_data[dataframes[subset]["Model Name"].str.contains(search_term, case=False)]
filtered_data.sort_values(by="Total", ascending=False, inplace=True)
selected_columns = [c.name for c in fields(AutoEvalColumn) if c.name in selected_columns]
selected_data = filtered_data[selected_columns]
return gr.DataFrame(
selected_data,
type="pandas",
datatype=[c.type for c in fields(AutoEvalColumn) if c.name in selected_columns],
)
with gr.Row():
refresh_button = gr.Button("Refresh")
refresh_button.click(
refresh,
inputs=[
selected_subset,
],
outputs=[df, research_textbox, selected_columns],
concurrency_limit=20,
)
selected_subset.change(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=df)
research_textbox.submit(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=df)
selected_columns.change(update_data, inputs=[selected_subset, research_textbox, selected_columns], outputs=df)
def init_detailed_results():
with gr.Row():
gr.HTML("""\
<iframe
src="https://huggingface.co/datasets/lmms-lab/LiveBenchDetailedResults/embed/viewer/"
frameborder="0"
width="100%"
height="800px"
></iframe>
""")
HEAD = "".join(
[
f'<link rel="stylesheet" href="{css}">' for css in CSS_EXTERNAL
]
+
[
f'<script src="{js}" crossorigin="anonymous"></script>' for js in JS_EXTERNAL
]
)
demo = gr.Blocks(css=custom_css, head = HEAD)
with demo:
gr.HTML(TITLE)
gr.HTML(LINKS)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… LiveBench Results", elem_id="llm-benchmark-tab-table", id=0):
init_leaderboard(LEADERBOARD_DF)
with gr.TabItem("πŸ“ Detailed Results", elem_id="llm-benchmark-tab-table", id=2):
init_detailed_results()
# with gr.Row():
# with gr.Accordion("πŸ“™ Citation", open=False):
# citation_button = gr.Textbox(
# value=CITATION_BUTTON_TEXT,
# label=CITATION_BUTTON_LABEL,
# lines=20,
# elem_id="citation-button",
# show_copy_button=True,
# )
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=1800)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()