|
import io |
|
import json |
|
|
|
import gradio as gr |
|
import pandas as pd |
|
from huggingface_hub import HfFileSystem |
|
|
|
|
|
RESULTS_DATASET_ID = "datasets/open-llm-leaderboard/results" |
|
EXCLUDED_KEYS = { |
|
"pretty_env_info", |
|
"chat_template", |
|
"group_subtasks", |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DETAILS_DATASET_ID = "datasets/open-llm-leaderboard/{model_name_sanitized}-details" |
|
DETAILS_FILENAME = "samples_{subtask}_*.json" |
|
|
|
TASKS = { |
|
"leaderboard_arc_challenge": ("ARC", "leaderboard_arc_challenge"), |
|
"leaderboard_bbh": ("BBH", "leaderboard_bbh"), |
|
"leaderboard_gpqa": ("GPQA", "leaderboard_gpqa"), |
|
"leaderboard_ifeval": ("IFEval", "leaderboard_ifeval"), |
|
"leaderboard_math_hard": ("MATH", "leaderboard_math"), |
|
"leaderboard_mmlu_pro": ("MMLU-Pro", "leaderboard_mmlu_pro"), |
|
"leaderboard_musr": ("MuSR", "leaderboard_musr"), |
|
} |
|
SUBTASKS = { |
|
"leaderboard_arc_challenge": ["leaderboard_arc_challenge"], |
|
"leaderboard_bbh": [ |
|
"leaderboard_bbh_boolean_expressions", |
|
"leaderboard_bbh_causal_judgement", |
|
"leaderboard_bbh_date_understanding", |
|
"leaderboard_bbh_disambiguation_qa", |
|
"leaderboard_bbh_formal_fallacies", |
|
"leaderboard_bbh_geometric_shapes", |
|
"leaderboard_bbh_hyperbaton", |
|
"leaderboard_bbh_logical_deduction_five_objects", |
|
"leaderboard_bbh_logical_deduction_seven_objects", |
|
"leaderboard_bbh_logical_deduction_three_objects", |
|
"leaderboard_bbh_movie_recommendation", |
|
"leaderboard_bbh_navigate", |
|
"leaderboard_bbh_object_counting", |
|
"leaderboard_bbh_penguins_in_a_table", |
|
"leaderboard_bbh_reasoning_about_colored_objects", |
|
"leaderboard_bbh_ruin_names", |
|
"leaderboard_bbh_salient_translation_error_detection", |
|
"leaderboard_bbh_snarks", "leaderboard_bbh_sports_understanding", |
|
"leaderboard_bbh_temporal_sequences", |
|
"leaderboard_bbh_tracking_shuffled_objects_five_objects", |
|
"leaderboard_bbh_tracking_shuffled_objects_seven_objects", |
|
"leaderboard_bbh_tracking_shuffled_objects_three_objects", |
|
"leaderboard_bbh_web_of_lies", |
|
], |
|
"leaderboard_gpqa": [ |
|
"leaderboard_gpqa_extended", |
|
"leaderboard_gpqa_diamond", |
|
"leaderboard_gpqa_main", |
|
], |
|
"leaderboard_ifeval": ["leaderboard_ifeval"], |
|
|
|
"leaderboard_math": [ |
|
"leaderboard_math_algebra_hard", |
|
"leaderboard_math_counting_and_prob_hard", |
|
"leaderboard_math_geometry_hard", |
|
"leaderboard_math_intermediate_algebra_hard", |
|
"leaderboard_math_num_theory_hard", |
|
"leaderboard_math_prealgebra_hard", |
|
"leaderboard_math_precalculus_hard", |
|
], |
|
"leaderboard_mmlu_pro": ["leaderboard_mmlu_pro"], |
|
"leaderboard_musr": [ |
|
"leaderboard_musr_murder_mysteries", |
|
"leaderboard_musr_object_placements", |
|
"leaderboard_musr_team_allocation", |
|
], |
|
} |
|
|
|
|
|
fs = HfFileSystem() |
|
|
|
|
|
def fetch_result_paths(): |
|
paths = fs.glob(f"{RESULTS_DATASET_ID}/**/**/*.json") |
|
return paths |
|
|
|
|
|
def filter_latest_result_path_per_model(paths): |
|
from collections import defaultdict |
|
|
|
d = defaultdict(list) |
|
for path in paths: |
|
model_id, _ = path[len(RESULTS_DATASET_ID) +1:].rsplit("/", 1) |
|
d[model_id].append(path) |
|
return {model_id: max(paths) for model_id, paths in d.items()} |
|
|
|
|
|
def get_result_path_from_model(model_id, result_path_per_model): |
|
return result_path_per_model[model_id] |
|
|
|
|
|
def update_load_results_component(): |
|
return gr.Button("Load Results", interactive=True) |
|
|
|
|
|
def load_data(result_path) -> pd.DataFrame: |
|
with fs.open(result_path, "r") as f: |
|
data = json.load(f) |
|
return data |
|
|
|
|
|
def load_results_dataframe(model_id): |
|
if not model_id: |
|
return |
|
result_path = get_result_path_from_model(model_id, latest_result_path_per_model) |
|
data = load_data(result_path) |
|
model_name = data.get("model_name", "Model") |
|
df = pd.json_normalize([{key: value for key, value in data.items() if key not in EXCLUDED_KEYS}]) |
|
|
|
return df.set_index(pd.Index([model_name])).reset_index() |
|
|
|
|
|
def load_results_dataframes(*model_ids): |
|
return [load_results_dataframe(model_id) for model_id in model_ids] |
|
|
|
|
|
def display_results(task, *dfs): |
|
dfs = [df.set_index("index") for df in dfs if "index" in df.columns] |
|
if not dfs: |
|
return None, None |
|
df = pd.concat(dfs) |
|
df = df.T.rename_axis(columns=None) |
|
return display_tab("results", df, task), display_tab("configs", df, task) |
|
|
|
|
|
def display_tab(tab, df, task): |
|
df = df.style.format(na_rep="") |
|
df.hide( |
|
[ |
|
row |
|
for row in df.index |
|
if ( |
|
not row.startswith(f"{tab}.") |
|
or row.startswith(f"{tab}.leaderboard.") |
|
or row.endswith(".alias") |
|
or (not row.startswith(f"{tab}.{task}") if task != "All" else False) |
|
) |
|
], |
|
axis="index", |
|
) |
|
start = len(f"{tab}.leaderboard_") if task == "All" else len(f"{tab}.{task} ") |
|
df.format_index(lambda idx: idx[start:].removesuffix(",none"), axis="index") |
|
return df.to_html() |
|
|
|
|
|
def update_tasks_component(): |
|
return gr.Radio( |
|
["All"] + list(TASKS.values()), |
|
label="Tasks", |
|
info="Evaluation tasks to be displayed", |
|
value="All", |
|
interactive=True, |
|
) |
|
|
|
|
|
def clear_results(): |
|
|
|
return ( |
|
None, None, None, None, |
|
gr.Radio( |
|
["All"] + list(TASKS.values()), |
|
label="Tasks", |
|
info="Evaluation tasks to be displayed", |
|
value="All", |
|
interactive=False, |
|
), |
|
) |
|
|
|
|
|
def update_subtasks_component(task): |
|
return gr.Radio( |
|
SUBTASKS.get(task), |
|
info="Evaluation subtasks to be displayed", |
|
value=None, |
|
) |
|
|
|
|
|
def update_load_details_component(model_id_1, model_id_2, subtask): |
|
if (model_id_1 or model_id_2) and subtask: |
|
return gr.Button("Load Details", interactive=True) |
|
else: |
|
return gr.Button("Load Details", interactive=False) |
|
|
|
|
|
def load_details_dataframe(model_id, subtask): |
|
if not model_id or not subtask: |
|
return |
|
model_name_sanitized = model_id.replace("/", "__") |
|
paths = fs.glob( |
|
f"{DETAILS_DATASET_ID}/**/{DETAILS_FILENAME}".format( |
|
model_name_sanitized=model_name_sanitized, subtask=subtask |
|
) |
|
) |
|
if not paths: |
|
return |
|
path = max(paths) |
|
with fs.open(path, "r") as f: |
|
data = [json.loads(line) for line in f] |
|
df = pd.json_normalize(data) |
|
|
|
df["model_name"] = model_id |
|
return df |
|
|
|
|
|
|
|
def load_details_dataframes(subtask, *model_ids): |
|
return [load_details_dataframe(model_id, subtask) for model_id in model_ids] |
|
|
|
|
|
def display_details(sample_idx, *dfs): |
|
rows = [df.iloc[sample_idx] for df in dfs if "model_name" in df.columns and sample_idx < len(df)] |
|
if not rows: |
|
return |
|
|
|
df = pd.concat([row.rename(row.pop("model_name")) for row in rows], axis="columns") |
|
return ( |
|
df.style |
|
.format(na_rep="") |
|
|
|
.to_html() |
|
) |
|
|
|
|
|
def update_sample_idx_component(*dfs): |
|
maximum = max([len(df) - 1 for df in dfs]) |
|
return gr.Number( |
|
label="Sample Index", |
|
info="Index of the sample to be displayed", |
|
value=0, |
|
minimum=0, |
|
maximum=maximum, |
|
visible=True, |
|
) |
|
|
|
|
|
|
|
latest_result_path_per_model = filter_latest_result_path_per_model(fetch_result_paths()) |
|
|
|
with gr.Blocks(fill_height=True) as demo: |
|
gr.HTML("<h1 style='text-align: center;'>Compare Results of the 🤗 Open LLM Leaderboard</h1>") |
|
gr.HTML("<h3 style='text-align: center;'>Select 2 models to load and compare their results</h3>") |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
model_id_1 = gr.Dropdown(choices=list(latest_result_path_per_model.keys()), label="Models") |
|
dataframe_1 = gr.Dataframe(visible=False) |
|
with gr.Column(): |
|
model_id_2 = gr.Dropdown(choices=list(latest_result_path_per_model.keys()), label="Models") |
|
dataframe_2 = gr.Dataframe(visible=False) |
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Tab("Results"): |
|
task = gr.Radio( |
|
["All"] + list(TASKS.values()), |
|
label="Tasks", |
|
info="Evaluation tasks to be displayed", |
|
value="All", |
|
interactive=False, |
|
) |
|
load_results_btn = gr.Button("Load Results", interactive=False) |
|
clear_results_btn = gr.Button("Clear Results") |
|
with gr.Tab("Results"): |
|
results = gr.HTML() |
|
with gr.Tab("Configs"): |
|
configs = gr.HTML() |
|
with gr.Tab("Details"): |
|
details_task = gr.Radio( |
|
["All"] + list(TASKS.values()), |
|
label="Tasks", |
|
info="Evaluation tasks to be displayed", |
|
value="All", |
|
interactive=True, |
|
) |
|
subtask = gr.Radio( |
|
SUBTASKS.get(details_task.value), |
|
label="Subtasks", |
|
info="Evaluation subtasks to be displayed (choose one of the Tasks above)", |
|
) |
|
load_details_btn = gr.Button("Load Details", interactive=False) |
|
sample_idx = gr.Number( |
|
label="Sample Index", |
|
info="Index of the sample to be displayed", |
|
value=0, |
|
minimum=0, |
|
visible=False |
|
) |
|
details = gr.HTML() |
|
details_dataframe_1 = gr.Dataframe(visible=False) |
|
details_dataframe_2 = gr.Dataframe(visible=False) |
|
details_dataframe = gr.DataFrame(visible=False) |
|
|
|
model_id_1.change( |
|
fn=update_load_results_component, |
|
outputs=load_results_btn, |
|
) |
|
load_results_btn.click( |
|
fn=load_results_dataframes, |
|
inputs=[model_id_1, model_id_2], |
|
outputs=[dataframe_1, dataframe_2], |
|
).then( |
|
fn=update_tasks_component, |
|
outputs=task, |
|
) |
|
gr.on( |
|
triggers=[dataframe_1.change, dataframe_2.change, task.change], |
|
fn=display_results, |
|
inputs=[task, dataframe_1, dataframe_2], |
|
outputs=[results, configs], |
|
) |
|
clear_results_btn.click( |
|
fn=clear_results, |
|
outputs=[model_id_1, model_id_2, dataframe_1, dataframe_2, task], |
|
) |
|
|
|
details_task.change( |
|
fn=update_subtasks_component, |
|
inputs=details_task, |
|
outputs=subtask, |
|
) |
|
gr.on( |
|
triggers=[model_id_1.change, model_id_2.change, subtask.change, details_task.change], |
|
fn=update_load_details_component, |
|
inputs=[model_id_1, model_id_2, subtask], |
|
outputs=load_details_btn, |
|
) |
|
load_details_btn.click( |
|
fn=load_details_dataframes, |
|
inputs=[subtask, model_id_1, model_id_2], |
|
outputs=[details_dataframe_1, details_dataframe_2], |
|
).then( |
|
fn=display_details, |
|
inputs=[sample_idx, details_dataframe_1, details_dataframe_2], |
|
outputs=details, |
|
).then( |
|
fn=update_sample_idx_component, |
|
inputs=[details_dataframe_1, details_dataframe_2], |
|
outputs=sample_idx, |
|
) |
|
sample_idx.change( |
|
fn=display_details, |
|
inputs=[sample_idx, details_dataframe_1, details_dataframe_2], |
|
outputs=details, |
|
) |
|
|
|
demo.launch() |
|
|