|
import gradio as gr |
|
import os |
|
from huggingface_hub import HfApi, snapshot_download |
|
from apscheduler.schedulers.background import BackgroundScheduler |
|
from datasets import load_dataset |
|
from src.utils import load_all_data |
|
from src.md import ABOUT_TEXT, TOP_TEXT |
|
from src.plt import plot_avg_correlation |
|
import numpy as np |
|
|
|
api = HfApi() |
|
|
|
COLLAB_TOKEN = os.environ.get("COLLAB_TOKEN") |
|
evals_repo = "ai2-adapt-dev/HERM-Results" |
|
|
|
eval_set_repo = "ai2-adapt-dev/rm-benchmark-dev" |
|
repo_dir_herm = "./evals/herm/" |
|
|
|
def restart_space(): |
|
api.restart_space(repo_id="ai2-adapt-dev/rm-benchmark-viewer", token=COLLAB_TOKEN) |
|
|
|
print("Pulling evaluation results") |
|
repo = snapshot_download( |
|
local_dir=repo_dir_herm, |
|
repo_id=evals_repo, |
|
use_auth_token=COLLAB_TOKEN, |
|
tqdm_class=None, |
|
etag_timeout=30, |
|
repo_type="dataset", |
|
) |
|
|
|
|
|
def avg_over_herm(dataframe): |
|
""" |
|
Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns. |
|
""" |
|
new_df = dataframe.copy() |
|
subsets = ["alpacaeval", "mt-bench", "llmbar", "refusals", "hep"] |
|
|
|
for subset in subsets: |
|
if subset == "refusals": |
|
subset_cols = ["refusals-dangerous", "refusals-offensive", "donotanswer","xstest-should-refuse", "xstest-should-respond"] |
|
else: |
|
subset_cols = [col for col in new_df.columns if subset in col] |
|
new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2) |
|
|
|
keep_columns = ["model", "average"] + subsets |
|
new_df = new_df[keep_columns] |
|
|
|
new_df["average"] = np.round(np.nanmean(new_df[subsets].values, axis=1), 2) |
|
|
|
new_df = new_df.rename(columns={"hep": "hep (code)"}) |
|
return new_df |
|
|
|
def expand_subsets(dataframe): |
|
|
|
pass |
|
|
|
|
|
length_categories = { |
|
'alpacaeval-easy': 'True', |
|
'alpacaeval-hard': 'True', |
|
'alpacaeval-length': 'Neutral', |
|
'donotanswer': 'False', |
|
'hep-cpp': 'Neutral', |
|
'hep-go': 'Neutral', |
|
'hep-java': 'Neutral', |
|
'hep-js': 'Neutral', |
|
'hep-python': 'Neutral', |
|
'hep-rust': 'Neutral', |
|
'llmbar-adver-GPTInst': 'False', |
|
'llmbar-adver-GPTOut': 'Neutral', |
|
'llmbar-adver-manual': 'False', |
|
'llmbar-adver-neighbor': 'False', |
|
'llmbar-natural': 'Neutral', |
|
'mt-bench-easy': 'False', |
|
'mt-bench-hard': 'False', |
|
'mt-bench-med': 'Neutral', |
|
'refusals-dangerous': 'False', |
|
'refusals-offensive': 'False', |
|
'xstest-should-refuse': 'False', |
|
'xstest-should-respond': 'True' |
|
} |
|
|
|
def length_bias_check(dataframe): |
|
""" |
|
Takes the raw herm dataframe and splits the data into new buckets according to length_categories. |
|
Then, take the average of the three buckets as "average" |
|
""" |
|
new_df = dataframe.copy() |
|
existing_subsets = new_df.columns[2:] |
|
final_subsets = ["Length Bias", "Neutral", "Terse Bias"] |
|
|
|
new_data = {s: [] for s in final_subsets} |
|
|
|
|
|
|
|
for subset in existing_subsets: |
|
subset_data = new_df[subset].values |
|
subset_length = length_categories[subset] |
|
|
|
if subset_length == "True": |
|
new_data["Length Bias"].append(subset_data) |
|
elif subset_length == "Neutral": |
|
new_data["Neutral"].append(subset_data) |
|
elif subset_length == "False": |
|
new_data["Terse Bias"].append(subset_data) |
|
|
|
|
|
for subset in final_subsets: |
|
new_df[subset] = np.round(np.nanmean(new_data[subset], axis=0), 2) |
|
keep_columns = ["model"] + final_subsets |
|
new_df = new_df[keep_columns] |
|
|
|
|
|
|
|
return new_df |
|
|
|
|
|
|
|
herm_data = load_all_data(repo_dir_herm, subdir="eval-set").sort_values(by='average', ascending=False) |
|
herm_data_avg = avg_over_herm(herm_data).sort_values(by='average', ascending=False) |
|
herm_data_length = length_bias_check(herm_data).sort_values(by='Terse Bias', ascending=False) |
|
prefs_data = load_all_data(repo_dir_herm, subdir="pref-sets").sort_values(by='average', ascending=False) |
|
|
|
|
|
col_types_herm = ["markdown"] + ["number"] * (len(herm_data.columns) - 1) |
|
col_types_herm_avg = ["markdown"] + ["number"] * (len(herm_data_avg.columns) - 1) |
|
cols_herm_data_length = ["markdown"] + ["number"] * (len(herm_data_length.columns) - 1) |
|
col_types_prefs = ["markdown"] + ["number"] * (len(prefs_data.columns) - 1) |
|
|
|
|
|
|
|
eval_set = load_dataset(eval_set_repo, use_auth_token=COLLAB_TOKEN, split="filtered") |
|
def random_sample(r: gr.Request, subset): |
|
if subset is None or subset == []: |
|
sample_index = np.random.randint(0, len(eval_set) - 1) |
|
sample = eval_set[sample_index] |
|
else: |
|
if isinstance(subset, str): |
|
subset = [subset] |
|
|
|
eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset) |
|
sample_index = np.random.randint(0, len(eval_set_filtered) - 1) |
|
sample = eval_set_filtered[sample_index] |
|
|
|
markdown_text = '\n\n'.join([f"**{key}**:\n\n{value}" for key, value in sample.items()]) |
|
return markdown_text |
|
|
|
subsets = eval_set.unique("subset") |
|
|
|
with gr.Blocks() as app: |
|
|
|
with gr.Row(): |
|
gr.Markdown(TOP_TEXT) |
|
with gr.Tabs(elem_classes="tab-buttons") as tabs: |
|
with gr.TabItem("HERM Eval Set - Overview"): |
|
with gr.Row(): |
|
herm_table = gr.Dataframe( |
|
herm_data_avg.values, |
|
datatype=col_types_herm_avg, |
|
headers=herm_data_avg.columns.tolist(), |
|
elem_id="herm_dataframe_avg", |
|
height=1000, |
|
) |
|
with gr.TabItem("HERM Eval Set - Detailed"): |
|
with gr.Row(): |
|
herm_table = gr.Dataframe( |
|
herm_data.values, |
|
datatype=col_types_herm, |
|
headers=herm_data.columns.tolist(), |
|
elem_id="herm_dataframe", |
|
height=1000, |
|
) |
|
with gr.TabItem("HERM Eval Set - Length Bias"): |
|
with gr.Row(): |
|
herm_table = gr.Dataframe( |
|
herm_data_length.values, |
|
datatype=cols_herm_data_length, |
|
headers=herm_data_length.columns.tolist(), |
|
elem_id="herm_dataframe_length", |
|
height=1000, |
|
) |
|
with gr.TabItem("Known Pref. Sets"): |
|
with gr.Row(): |
|
PREF_SET_TEXT = """ |
|
For more information, see the [dataset](https://huggingface.co/datasets/allenai/pref-test-sets). |
|
""" |
|
gr.Markdown(PREF_SET_TEXT) |
|
with gr.Row(): |
|
pref_sets_table = gr.Dataframe( |
|
prefs_data.values, |
|
datatype=col_types_prefs, |
|
headers=prefs_data.columns.tolist(), |
|
elem_id="prefs_dataframe", |
|
height=1000, |
|
) |
|
|
|
with gr.TabItem("About"): |
|
with gr.Row(): |
|
gr.Markdown(ABOUT_TEXT) |
|
|
|
with gr.TabItem("Dataset Viewer"): |
|
with gr.Row(): |
|
|
|
gr.Markdown("## Random Dataset Sample Viewer") |
|
subset_selector = gr.Dropdown(subsets, label="Subset", value=None, multiselect=True) |
|
button = gr.Button("Show Random Sample") |
|
|
|
with gr.Row(): |
|
sample_display = gr.Markdown("{sampled data loads here}") |
|
|
|
button.click(fn=random_sample, inputs=[subset_selector], outputs=[sample_display]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scheduler = BackgroundScheduler() |
|
scheduler.add_job(restart_space, "interval", seconds=10800) |
|
scheduler.start() |
|
|
|
|
|
app.queue().launch() |
|
|