Spaces:
Running
Running
import gradio as gr | |
import os | |
from huggingface_hub import HfApi, snapshot_download | |
from apscheduler.schedulers.background import BackgroundScheduler | |
from datasets import load_dataset | |
from src.utils import load_all_data | |
from src.md import ABOUT_TEXT | |
import numpy as np | |
api = HfApi() | |
COLLAB_TOKEN = os.environ.get("COLLAB_TOKEN") | |
evals_repo = "ai2-adapt-dev/rm-benchmark-results" | |
prefs_repo = "ai2-adapt-dev/rm-testset-results" | |
eval_set_repo = "ai2-adapt-dev/rm-benchmark-dev" | |
repo_dir_herm = "./evals/herm/" | |
repo_dir_prefs = "./evals/prefs/" | |
def restart_space(): | |
api.restart_space(repo_id="ai2-adapt-dev/rm-benchmark-viewer", token=COLLAB_TOKEN) | |
print("Pulling evaluation results") | |
repo = snapshot_download( | |
local_dir=repo_dir_herm, | |
repo_id=evals_repo, | |
use_auth_token=COLLAB_TOKEN, | |
tqdm_class=None, | |
etag_timeout=30, | |
repo_type="dataset", | |
) | |
repo_pref_sets = snapshot_download( | |
local_dir=repo_dir_prefs, | |
repo_id=prefs_repo, | |
use_auth_token=COLLAB_TOKEN, | |
tqdm_class=None, | |
etag_timeout=30, | |
repo_type="dataset", | |
) | |
def avg_over_herm(dataframe): | |
""" | |
Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns. | |
""" | |
new_df = dataframe.copy() | |
subsets = ["alpacaeval", "mt-bench", "llmbar", "refusals", "hep"] | |
# for each subset, avg the columns that have the subset in the column name, then add a new column with subset name and avg | |
for subset in subsets: | |
if subset == "refusals": | |
subset_cols = ["refusals-dangerous", "refusals-offensive", "donotanswer","xstest-should-refuse", "xstest-should-respond"] | |
else: | |
subset_cols = [col for col in new_df.columns if subset in col] | |
new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2) | |
keep_columns = ["model", "average"] + subsets | |
new_df = new_df[keep_columns] | |
# replace average column with new average | |
new_df["average"] = np.round(np.nanmean(new_df[subsets].values, axis=1), 2) | |
# rename column "hep" to "hep (code)" | |
new_df = new_df.rename(columns={"hep": "hep (code)"}) | |
return new_df | |
def expand_subsets(dataframe): | |
# TODO need to modify data/ script to do this | |
pass | |
herm_data = load_all_data(repo_dir_herm).sort_values(by='average', ascending=False) | |
herm_data_avg = avg_over_herm(herm_data).sort_values(by='average', ascending=False) | |
prefs_data = load_all_data(repo_dir_prefs).sort_values(by='average', ascending=False) | |
# prefs_data_sub = expand_subsets(prefs_data).sort_values(by='average', ascending=False) | |
col_types_herm = ["markdown"] + ["number"] * (len(herm_data.columns) - 1) | |
col_types_herm_avg = ["markdown"] + ["number"] * (len(herm_data_avg.columns) - 1) | |
col_types_prefs = ["markdown"] + ["number"] * (len(prefs_data.columns) - 1) | |
# col_types_prefs_sub = ["markdown"] + ["number"] * (len(prefs_data_sub.columns) - 1) | |
# for showing random samples | |
eval_set = load_dataset(eval_set_repo, use_auth_token=COLLAB_TOKEN, split="filtered") | |
def random_sample(r: gr.Request, subset): | |
if subset is None or subset == []: | |
sample_index = np.random.randint(0, len(eval_set) - 1) | |
sample = eval_set[sample_index] | |
else: # filter by subsets (can be list) | |
if isinstance(subset, str): | |
subset = [subset] | |
# filter down dataset to only include the subset(s) | |
eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset) | |
sample_index = np.random.randint(0, len(eval_set_filtered) - 1) | |
sample = eval_set_filtered[sample_index] | |
markdown_text = '\n\n'.join([f"**{key}**:\n{value}" for key, value in sample.items()]) | |
return markdown_text | |
subsets = eval_set.unique("subset") | |
with gr.Blocks() as app: | |
# create tabs for the app, moving the current table to one titled "HERM" and the benchmark_text to a tab called "About" | |
with gr.Row(): | |
gr.Markdown("# HERM Results Viewer") | |
with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
with gr.TabItem("HERM - Overview"): | |
with gr.Row(): | |
herm_table = gr.Dataframe( | |
herm_data_avg.values, | |
datatype=col_types_herm_avg, | |
headers=herm_data_avg.columns.tolist(), | |
elem_id="herm_dataframe_avg", | |
) | |
with gr.TabItem("HERM - Detailed"): | |
with gr.Row(): | |
herm_table = gr.Dataframe( | |
herm_data.values, | |
datatype=col_types_herm, | |
headers=herm_data.columns.tolist(), | |
elem_id="herm_dataframe", | |
) | |
with gr.TabItem("Pref Sets - Overview"): | |
pref_sets_table = gr.Dataframe( | |
prefs_data.values, | |
datatype=col_types_prefs, | |
headers=prefs_data.columns.tolist(), | |
elem_id="prefs_dataframe", | |
) | |
with gr.TabItem("About"): | |
with gr.Row(): | |
gr.Markdown(ABOUT_TEXT) | |
with gr.TabItem("Dataset Viewer"): | |
with gr.Row(): | |
# loads one sample | |
gr.Markdown("## Random Dataset Sample Viewer") | |
subset_selector = gr.Dropdown(subsets, label="Subset", value=None, multiselect=True) | |
button = gr.Button("Show Random Sample") | |
with gr.Row(): | |
sample_display = gr.Markdown("{sampled data loads here}") | |
button.click(fn=random_sample, inputs=[subset_selector], outputs=[sample_display]) | |
# Load data when app starts, TODO make this used somewhere... | |
# def load_data_on_start(): | |
# data_herm = load_all_data(repo_dir_herm) | |
# herm_table.update(data_herm) | |
# data_herm_avg = avg_over_herm(repo_dir_herm) | |
# herm_table.update(data_herm_avg) | |
# data_prefs = load_all_data(repo_dir_prefs) | |
# pref_sets_table.update(data_prefs) | |
scheduler = BackgroundScheduler() | |
scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h | |
scheduler.start() | |
app.queue().launch() | |