|
import gradio as gr |
|
import os |
|
from huggingface_hub import HfApi, snapshot_download |
|
from apscheduler.schedulers.background import BackgroundScheduler |
|
from datasets import load_dataset |
|
from src.utils import load_all_data |
|
from src.md import ABOUT_TEXT, TOP_TEXT |
|
from src.plt import plot_avg_correlation |
|
from src.constants import subset_mapping, length_categories, example_counts |
|
from src.css import custom_css |
|
import numpy as np |
|
|
|
api = HfApi() |
|
|
|
COLLAB_TOKEN = os.environ.get("COLLAB_TOKEN") |
|
evals_repo = "allenai/reward-bench-results" |
|
|
|
eval_set_repo = "allenai/reward-bench" |
|
repo_dir_rewardbench = "./evals/rewardbench/" |
|
|
|
def restart_space(): |
|
api.restart_space(repo_id="allenai/reward-bench", token=COLLAB_TOKEN) |
|
|
|
print("Pulling evaluation results") |
|
repo = snapshot_download( |
|
local_dir=repo_dir_rewardbench, |
|
ignore_patterns=["pref-sets-scores/*", "eval-set-scores/*"], |
|
repo_id=evals_repo, |
|
use_auth_token=COLLAB_TOKEN, |
|
tqdm_class=None, |
|
etag_timeout=30, |
|
repo_type="dataset", |
|
) |
|
|
|
|
|
def avg_over_rewardbench(dataframe_core, dataframe_prefs): |
|
""" |
|
Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns. |
|
|
|
We average over 4 core sections (per prompt weighting): |
|
1. Chat: Includes the easy chat subsets (alpacaeval-easy, alpacaeval-length, alpacaeval-hard, mt-bench-easy, mt-bench-medium) |
|
2. Chat Hard: Includes the hard chat subsets (mt-bench-hard, llmbar-natural, llmbar-adver-neighbor, llmbar-adver-GPTInst, llmbar-adver-GPTOut, llmbar-adver-manual) |
|
3. Safety: Includes the safety subsets (refusals-dangerous, refusals-offensive, xstest-should-refuse, xstest-should-respond, do not answer) |
|
4. Reasoning: Includes the code and math subsets (math-prm, hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust) |
|
5. Prior Sets (0.5 weight): Includes the test sets (anthropic_helpful, mtbench_human, shp, summarize) |
|
""" |
|
new_df = dataframe_core.copy() |
|
dataframe_prefs = dataframe_prefs.copy() |
|
|
|
|
|
for subset, sub_subsets in subset_mapping.items(): |
|
subset_cols = [col for col in new_df.columns if col in sub_subsets] |
|
sub_data = new_df[subset_cols].values |
|
sub_counts = [example_counts[s] for s in subset_cols] |
|
new_df[subset] = np.average(sub_data, axis=1, weights=sub_counts) |
|
|
|
|
|
data_cols = list(subset_mapping.keys()) |
|
keep_columns = ["model",] + ["model_type"] + data_cols |
|
|
|
new_df = new_df[keep_columns] |
|
|
|
|
|
pref_columns = ["anthropic_helpful", "anthropic_hhh", "shp", "summarize"] |
|
pref_data = dataframe_prefs[pref_columns].values |
|
|
|
|
|
dataframe_prefs["Prior Sets (0.5 weight)"] = np.nanmean(pref_data, axis=1) |
|
|
|
|
|
new_df["Prior Sets (0.5 weight)"] = np.nan |
|
|
|
values = [] |
|
for i, row in new_df.iterrows(): |
|
model = row["model"] |
|
if model in dataframe_prefs["model"].values: |
|
values.append(dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0]) |
|
|
|
else: |
|
values.append(np.nan) |
|
|
|
new_df["Prior Sets (0.5 weight)"] = values |
|
|
|
|
|
data_cols += ["Prior Sets (0.5 weight)"] |
|
final_data = new_df[data_cols].values |
|
masked_data = np.ma.masked_array(final_data, np.isnan(final_data)) |
|
weights = [2, 2, 2, 2, 1] |
|
average = np.ma.average(masked_data, axis=1, weights=weights) |
|
new_df["average"] = average.filled(np.nan) |
|
|
|
|
|
|
|
keep_columns = ["model", "model_type", "average"] + data_cols |
|
new_df = new_df[keep_columns] |
|
return new_df |
|
|
|
def expand_subsets(dataframe): |
|
|
|
pass |
|
|
|
|
|
def length_bias_check(dataframe): |
|
""" |
|
Takes the raw rewardbench dataframe and splits the data into new buckets according to length_categories. |
|
Then, take the average of the three buckets as "average" |
|
""" |
|
new_df = dataframe.copy() |
|
existing_subsets = new_df.columns[3:] |
|
final_subsets = ["Length Bias", "Neutral", "Terse Bias"] |
|
|
|
new_data = {s: [] for s in final_subsets} |
|
|
|
|
|
|
|
for subset in existing_subsets: |
|
subset_data = new_df[subset].values |
|
subset_length = length_categories[subset] |
|
|
|
if subset_length == "True": |
|
new_data["Length Bias"].append(subset_data) |
|
elif subset_length == "Neutral": |
|
new_data["Neutral"].append(subset_data) |
|
elif subset_length == "False": |
|
new_data["Terse Bias"].append(subset_data) |
|
|
|
|
|
for subset in final_subsets: |
|
new_df[subset] = np.nanmean(new_data[subset], axis=0) |
|
keep_columns = ["model"] + final_subsets |
|
new_df = new_df[keep_columns] |
|
|
|
|
|
|
|
return new_df |
|
|
|
|
|
|
|
rewardbench_data = load_all_data(repo_dir_rewardbench, subdir="eval-set").sort_values(by='average', ascending=False) |
|
rewardbench_data_length = length_bias_check(rewardbench_data).sort_values(by='Terse Bias', ascending=False) |
|
prefs_data = load_all_data(repo_dir_rewardbench, subdir="pref-sets").sort_values(by='average', ascending=False) |
|
|
|
|
|
rewardbench_data_avg = avg_over_rewardbench(rewardbench_data, prefs_data).sort_values(by='average', ascending=False) |
|
|
|
def prep_df(df): |
|
|
|
df.insert(0, '', range(1, 1 + len(df))) |
|
|
|
|
|
df = df.rename(columns={"model": "Model", "model_type": "Model Type", "average": "Average"}) |
|
return df |
|
|
|
|
|
rewardbench_data = prep_df(rewardbench_data) |
|
rewardbench_data_avg = prep_df(rewardbench_data_avg).rename(columns={"Average": "Score"}) |
|
|
|
|
|
rewardbench_data_length = prep_df(rewardbench_data_length) |
|
prefs_data = prep_df(prefs_data) |
|
|
|
col_types_rewardbench = ["number"] + ["markdown"] + ["str"] + ["number"] * (len(rewardbench_data.columns) - 1) |
|
col_types_rewardbench_avg = ["number"] + ["markdown"]+ ["str"] + ["number"] * (len(rewardbench_data_avg.columns) - 1) |
|
cols_rewardbench_data_length = ["markdown"] + ["number"] * (len(rewardbench_data_length.columns) - 1) |
|
col_types_prefs = ["number"] + ["markdown"] + ["number"] * (len(prefs_data.columns) - 1) |
|
|
|
|
|
|
|
eval_set = load_dataset(eval_set_repo, use_auth_token=COLLAB_TOKEN, split="filtered") |
|
def random_sample(r: gr.Request, subset): |
|
if subset is None or subset == []: |
|
sample_index = np.random.randint(0, len(eval_set) - 1) |
|
sample = eval_set[sample_index] |
|
else: |
|
if isinstance(subset, str): |
|
subset = [subset] |
|
|
|
eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset) |
|
sample_index = np.random.randint(0, len(eval_set_filtered) - 1) |
|
sample = eval_set_filtered[sample_index] |
|
|
|
markdown_text = '\n\n'.join([f"**{key}**:\n\n{value}" for key, value in sample.items()]) |
|
return markdown_text |
|
|
|
subsets = eval_set.unique("subset") |
|
|
|
def regex_table(dataframe, regex, filter_button): |
|
""" |
|
Takes a model name as a regex, then returns only the rows that has that in it. |
|
""" |
|
|
|
regex_list = [x.strip() for x in regex.split(",")] |
|
|
|
combined_regex = '|'.join(regex_list) |
|
|
|
|
|
if isinstance(filter_button, list) or isinstance(filter_button, str): |
|
if "AI2 Experiments" not in filter_button and ("ai2" not in regex): |
|
dataframe = dataframe[~dataframe["Model"].str.contains("ai2", case=False, na=False)] |
|
if "Seq. Classifiers" not in filter_button: |
|
dataframe = dataframe[~dataframe["Model Type"].str.contains("Seq. Classifier", case=False, na=False)] |
|
if "DPO" not in filter_button: |
|
dataframe = dataframe[~dataframe["Model Type"].str.contains("DPO", case=False, na=False)] |
|
if "Custom Classifiers" not in filter_button: |
|
dataframe = dataframe[~dataframe["Model Type"].str.contains("Custom Classifier", case=False, na=False)] |
|
if "Generative" not in filter_button: |
|
dataframe = dataframe[~dataframe["Model Type"].str.contains("generative", case=False, na=False)] |
|
|
|
data = dataframe[dataframe["Model"].str.contains(combined_regex, case=False, na=False)] |
|
|
|
|
|
data[''] = np.arange(1, 1 + len(data)) |
|
|
|
|
|
if "Score" in data.columns: |
|
data["Score"] = np.round(np.array(data["Score"].values).astype(float), 2) |
|
if "Average" in data.columns: |
|
data["Average"] = np.round(np.array(data["Average"].values).astype(float), 1) |
|
|
|
for col in data.columns: |
|
if col not in ["", "Model", "Model Type", "Score", "Average"]: |
|
|
|
data[col] = data[col].replace('', np.NaN) |
|
data[col] = np.round(np.array(data[col].values).astype(float), 1) |
|
return data |
|
|
|
|
|
|
|
total_models = len(regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"]).values) |
|
|
|
with gr.Blocks(css=custom_css) as app: |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=6): |
|
gr.Markdown(TOP_TEXT.format(str(total_models))) |
|
with gr.Column(scale=4): |
|
|
|
|
|
|
|
gr.Markdown(""" |
|
![](file/src/logo.png) |
|
""") |
|
with gr.Tabs(elem_classes="tab-buttons") as tabs: |
|
with gr.TabItem("🏆 RewardBench Leaderboard"): |
|
with gr.Row(): |
|
search_1 = gr.Textbox(label="Model Search (delimit with , )", |
|
placeholder="Model Search (delimit with , )", |
|
show_label=False) |
|
model_types_1 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "AI2 Experiments"], |
|
value=["Seq. Classifiers", "DPO", "Custom Classifiers"], |
|
label="Model Types", |
|
show_label=False, |
|
|
|
) |
|
with gr.Row(): |
|
|
|
rewardbench_table_hidden = gr.Dataframe( |
|
rewardbench_data_avg.values, |
|
datatype=col_types_rewardbench_avg, |
|
headers=rewardbench_data_avg.columns.tolist(), |
|
visible=False, |
|
) |
|
rewardbench_table = gr.Dataframe( |
|
regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers"]).values, |
|
datatype=col_types_rewardbench_avg, |
|
headers=rewardbench_data_avg.columns.tolist(), |
|
elem_id="rewardbench_dataframe_avg", |
|
height=1000, |
|
) |
|
|
|
with gr.TabItem("🔍 RewardBench - Detailed"): |
|
with gr.Row(): |
|
search_2 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )") |
|
model_types_2 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "AI2 Experiments"], |
|
value=["Seq. Classifiers", "DPO", "Custom Classifiers"], |
|
label="Model Types", |
|
show_label=False, |
|
|
|
) |
|
with gr.Row(): |
|
|
|
rewardbench_table_detailed_hidden = gr.Dataframe( |
|
rewardbench_data.values, |
|
datatype=col_types_rewardbench, |
|
headers=rewardbench_data.columns.tolist(), |
|
visible=False, |
|
) |
|
rewardbench_table_detailed = gr.Dataframe( |
|
regex_table(rewardbench_data.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers"]).values, |
|
datatype=col_types_rewardbench, |
|
headers=rewardbench_data.columns.tolist(), |
|
elem_id="rewardbench_dataframe", |
|
height=1000, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.TabItem("Prior Test Sets"): |
|
with gr.Row(): |
|
search_3 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )") |
|
model_types_3 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "AI2 Experiments"], |
|
value=["Seq. Classifiers", "DPO", "Custom Classifiers"], |
|
label="Model Types", |
|
show_label=False, |
|
|
|
) |
|
with gr.Row(): |
|
PREF_SET_TEXT = """ |
|
For more information, see the [dataset](https://huggingface.co/datasets/allenai/pref-test-sets). Only the subsets Anthropic Helpful, Anthropic HHH, Stanford SHP, and OpenAI's Summarize data are used in the leaderboard ranking. |
|
""" |
|
gr.Markdown(PREF_SET_TEXT) |
|
with gr.Row(): |
|
|
|
pref_sets_table_hidden = gr.Dataframe( |
|
prefs_data.values, |
|
datatype=col_types_prefs, |
|
headers=prefs_data.columns.tolist(), |
|
visible=False, |
|
) |
|
pref_sets_table = gr.Dataframe( |
|
regex_table(prefs_data.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers"]).values, |
|
datatype=col_types_prefs, |
|
headers=prefs_data.columns.tolist(), |
|
elem_id="prefs_dataframe", |
|
height=1000, |
|
) |
|
|
|
|
|
with gr.TabItem("About"): |
|
with gr.Row(): |
|
gr.Markdown(ABOUT_TEXT) |
|
|
|
with gr.TabItem("Dataset Viewer"): |
|
with gr.Row(): |
|
|
|
gr.Markdown("""## Random Dataset Sample Viewer |
|
Warning, refusals, XSTest, and donotanswer datasets have sensitive content.""") |
|
subset_selector = gr.Dropdown(subsets, label="Subset", value=None, multiselect=True) |
|
button = gr.Button("Show Random Sample") |
|
|
|
with gr.Row(): |
|
sample_display = gr.Markdown("{sampled data loads here}") |
|
|
|
button.click(fn=random_sample, inputs=[subset_selector], outputs=[sample_display]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
search_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table) |
|
search_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed) |
|
|
|
search_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table) |
|
|
|
model_types_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table) |
|
model_types_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed) |
|
model_types_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table) |
|
|
|
with gr.Row(): |
|
with gr.Accordion("📚 Citation", open=False): |
|
citation_button = gr.Textbox( |
|
value=r"""@misc{RewardBench, |
|
title={RewardBench: Evaluating Reward Models for Language Modeling}, |
|
author={Lambert, Nathan and Pyatkin, Valentina and Morrison, Jacob and Miranda, LJ and Lin, Bill Yuchen and Chandu, Khyathi and Dziri, Nouha and Kumar, Sachin and Zick, Tom and Choi, Yejin and Smith, Noah A. and Hajishirzi, Hannaneh}, |
|
year={2024}, |
|
howpublished={\url{https://huggingface.co/spaces/allenai/reward-bench} |
|
}""", |
|
lines=7, |
|
label="Copy the following to cite these results.", |
|
elem_id="citation-button", |
|
show_copy_button=True, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scheduler = BackgroundScheduler() |
|
scheduler.add_job(restart_space, "interval", seconds=10800) |
|
scheduler.start() |
|
app.launch(allowed_paths=['src/']) |
|
|