Shane commited on
Commit
92c7f09
1 Parent(s): 7f5f365

made lots of changes

Browse files
Files changed (7) hide show
  1. app.py +2 -3
  2. app_old.py +0 -464
  3. src/constants.py +0 -60
  4. src/logo_old.png +0 -0
  5. src/md.py +19 -2
  6. src/md_old.py +0 -105
  7. src/utils_old.py +0 -171
app.py CHANGED
@@ -95,7 +95,7 @@ with gr.Blocks(css=custom_css) as app:
95
  # filter_button = gr.Checkbox(label="Include AI2 training runs (or type ai2 above).", interactive=True)
96
  # img = gr.Image(value="https://private-user-images.githubusercontent.com/10695622/310698241-24ed272a-0844-451f-b414-fde57478703e.png", width=500)
97
  gr.Markdown("""
98
- ![](file/src/logo.png)
99
  """)
100
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
101
  with gr.TabItem("🏆 HREF Leaderboard"):
@@ -149,8 +149,7 @@ with gr.Blocks(css=custom_css) as app:
149
  with gr.TabItem("Dataset Viewer"):
150
  with gr.Row():
151
  # loads one sample
152
- gr.Markdown("""## Random Dataset Sample Viewer
153
- Warning, refusals, XSTest, and donotanswer datasets have sensitive content.""")
154
  subset_selector = gr.Dropdown(subsets, label="Category", value=None, multiselect=True)
155
  button = gr.Button("Show Random Sample")
156
 
 
95
  # filter_button = gr.Checkbox(label="Include AI2 training runs (or type ai2 above).", interactive=True)
96
  # img = gr.Image(value="https://private-user-images.githubusercontent.com/10695622/310698241-24ed272a-0844-451f-b414-fde57478703e.png", width=500)
97
  gr.Markdown("""
98
+ <img src="file/src/logo.png" height="200">
99
  """)
100
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
101
  with gr.TabItem("🏆 HREF Leaderboard"):
 
149
  with gr.TabItem("Dataset Viewer"):
150
  with gr.Row():
151
  # loads one sample
152
+ gr.Markdown("""## Random Dataset Sample Viewer""")
 
153
  subset_selector = gr.Dropdown(subsets, label="Category", value=None, multiselect=True)
154
  button = gr.Button("Show Random Sample")
155
 
app_old.py DELETED
@@ -1,464 +0,0 @@
1
- import gradio as gr
2
- import os
3
- from huggingface_hub import HfApi, snapshot_download
4
- from apscheduler.schedulers.background import BackgroundScheduler
5
- from datasets import load_dataset
6
- from src.utils_old import load_all_data
7
- from src.md import ABOUT_TEXT, TOP_TEXT
8
- from src.constants import subset_mapping, length_categories, example_counts
9
- from src.css import custom_css
10
- import numpy as np
11
-
12
- api = HfApi()
13
-
14
- COLLAB_TOKEN = os.environ.get("COLLAB_TOKEN")
15
- evals_repo = "allenai/reward-bench-results"
16
-
17
- eval_set_repo = "allenai/reward-bench"
18
- repo_dir_rewardbench = "./evals/rewardbench/"
19
-
20
- def restart_space():
21
- api.restart_space(repo_id="allenai/reward-bench", token=COLLAB_TOKEN)
22
-
23
- print("Pulling evaluation results")
24
- repo = snapshot_download(
25
- local_dir=repo_dir_rewardbench,
26
- ignore_patterns=["pref-sets-scores/*", "eval-set-scores/*"],
27
- repo_id=evals_repo,
28
- use_auth_token=COLLAB_TOKEN,
29
- tqdm_class=None,
30
- etag_timeout=30,
31
- repo_type="dataset",
32
- )
33
-
34
-
35
- def avg_over_rewardbench(dataframe_core, dataframe_prefs):
36
- """
37
- Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns.
38
-
39
- We average over 4 core sections (per prompt weighting):
40
- 1. Chat: Includes the easy chat subsets (alpacaeval-easy, alpacaeval-length, alpacaeval-hard, mt-bench-easy, mt-bench-medium)
41
- 2. Chat Hard: Includes the hard chat subsets (mt-bench-hard, llmbar-natural, llmbar-adver-neighbor, llmbar-adver-GPTInst, llmbar-adver-GPTOut, llmbar-adver-manual)
42
- 3. Safety: Includes the safety subsets (refusals-dangerous, refusals-offensive, xstest-should-refuse, xstest-should-respond, do not answer)
43
- 4. Reasoning: Includes the code and math subsets (math-prm, hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
44
- 5. Prior Sets (0.5 weight): Includes the test sets (anthropic_helpful, mtbench_human, shp, summarize)
45
- """
46
- new_df = dataframe_core.copy()
47
- dataframe_prefs = dataframe_prefs.copy()
48
-
49
- # for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
50
- for subset, sub_subsets in subset_mapping.items():
51
- subset_cols = [col for col in new_df.columns if col in sub_subsets]
52
- sub_data = new_df[subset_cols].values # take the relevant column values
53
- sub_counts = [example_counts[s] for s in subset_cols] # take the example counts
54
- new_df[subset] = np.average(sub_data, axis=1, weights=sub_counts) # take the weighted average
55
- # new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2)
56
-
57
- data_cols = list(subset_mapping.keys())
58
- keep_columns = ["model",] + ["model_type"] + data_cols
59
- # keep_columns = ["model", "average"] + subsets
60
- new_df = new_df[keep_columns]
61
-
62
- # selected average from pref_sets
63
- pref_columns = ["anthropic_helpful", "anthropic_hhh", "shp", "summarize"]
64
- pref_data = dataframe_prefs[pref_columns].values
65
-
66
- # add column test sets knowing the rows are not identical, take superset
67
- dataframe_prefs["Prior Sets (0.5 weight)"] = np.nanmean(pref_data, axis=1)
68
-
69
- # add column Test Sets empty to new_df
70
- new_df["Prior Sets (0.5 weight)"] = np.nan
71
- # per row in new_df if model is in dataframe_prefs, add the value to new_df["Prior Sets (0.5 weight)"]
72
- values = []
73
- for i, row in new_df.iterrows():
74
- model = row["model"]
75
- if model in dataframe_prefs["model"].values:
76
- values.append(dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0])
77
- # new_df.at[i, "Prior Sets (0.5 weight)"] = dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0]
78
- else:
79
- values.append(np.nan)
80
-
81
- new_df["Prior Sets (0.5 weight)"] = values
82
-
83
- # add total average
84
- data_cols += ["Prior Sets (0.5 weight)"]
85
- final_data = new_df[data_cols].values
86
- masked_data = np.ma.masked_array(final_data, np.isnan(final_data))
87
- weights = [2, 2, 2, 2, 1]
88
- average = np.ma.average(masked_data, axis=1, weights=weights)
89
- new_df["average"] = average.filled(np.nan)
90
- # new_df["average"] = np.nanmean(new_df[data_cols].values, axis=1)
91
-
92
- # make average third column
93
- keep_columns = ["model", "model_type", "average"] + data_cols
94
- new_df = new_df[keep_columns]
95
- return new_df
96
-
97
- def expand_subsets(dataframe):
98
- # TODO need to modify data/ script to do this
99
- pass
100
-
101
-
102
- def length_bias_check(dataframe):
103
- """
104
- Takes the raw rewardbench dataframe and splits the data into new buckets according to length_categories.
105
- Then, take the average of the three buckets as "average"
106
- """
107
- new_df = dataframe.copy()
108
- existing_subsets = new_df.columns[3:] # model, model_type, average
109
- final_subsets = ["Length Bias", "Neutral", "Terse Bias"]
110
- # new data is empty list dict for each final subset
111
- new_data = {s: [] for s in final_subsets}
112
-
113
- # now, subsets correspond to those with True, Nuetral, and False length bias
114
- # check if length_categories[subset] == "True" or "False" or "Neutral"
115
- for subset in existing_subsets:
116
- subset_data = new_df[subset].values
117
- subset_length = length_categories[subset]
118
- # route to the correct bucket
119
- if subset_length == "True":
120
- new_data["Length Bias"].append(subset_data)
121
- elif subset_length == "Neutral":
122
- new_data["Neutral"].append(subset_data)
123
- elif subset_length == "False":
124
- new_data["Terse Bias"].append(subset_data)
125
-
126
- # take average of new_data and add to new_df (removing other columns than model)
127
- for subset in final_subsets:
128
- new_df[subset] = np.nanmean(new_data[subset], axis=0)
129
- keep_columns = ["model"] + final_subsets
130
- new_df = new_df[keep_columns]
131
- # recompute average
132
- # new_df["average"] = np.round(np.nanmean(new_df[final_subsets].values, axis=1), 2)
133
-
134
- return new_df
135
-
136
-
137
-
138
- rewardbench_data = load_all_data(repo_dir_rewardbench, subdir="eval-set").sort_values(by='average', ascending=False)
139
- rewardbench_data_length = length_bias_check(rewardbench_data).sort_values(by='Terse Bias', ascending=False)
140
- prefs_data = load_all_data(repo_dir_rewardbench, subdir="pref-sets").sort_values(by='average', ascending=False)
141
- # prefs_data_sub = expand_subsets(prefs_data).sort_values(by='average', ascending=False)
142
-
143
- rewardbench_data_avg = avg_over_rewardbench(rewardbench_data, prefs_data).sort_values(by='average', ascending=False)
144
-
145
- def prep_df(df):
146
- # add column to 0th entry with count (column name itself empty)
147
- df.insert(0, '', range(1, 1 + len(df)))
148
-
149
- # replace "model" with "Model" and "model_type" with "Model Type" and "average" with "Average"
150
- df = df.rename(columns={"model": "Model", "model_type": "Model Type", "average": "Average"})
151
-
152
- # if "Model Type" in columns
153
- if "Model Type" in df.columns:
154
- # get model_types that have generative in them
155
- mask = df["Model Type"].str.contains("generative", case=False, na=False)
156
-
157
- # set these values to "Generative"
158
- df.loc[mask, "Model Type"] = "Generative"
159
-
160
- return df
161
-
162
- # add count column to all dataframes
163
- rewardbench_data = prep_df(rewardbench_data)
164
- rewardbench_data_avg = prep_df(rewardbench_data_avg).rename(columns={"Average": "Score"})
165
- # adjust weight of this average to 50% for Prior Sets (0.5 weight), 1 for others
166
-
167
- rewardbench_data_length = prep_df(rewardbench_data_length)
168
- prefs_data = prep_df(prefs_data)
169
-
170
- col_types_rewardbench = ["number"] + ["markdown"] + ["str"] + ["number"] * (len(rewardbench_data.columns) - 1)
171
- col_types_rewardbench_avg = ["number"] + ["markdown"]+ ["str"] + ["number"] * (len(rewardbench_data_avg.columns) - 1)
172
- cols_rewardbench_data_length = ["markdown"] + ["number"] * (len(rewardbench_data_length.columns) - 1)
173
- col_types_prefs = ["number"] + ["markdown"] + ["number"] * (len(prefs_data.columns) - 1)
174
- # col_types_prefs_sub = ["markdown"] + ["number"] * (len(prefs_data_sub.columns) - 1)
175
-
176
- # for showing random samples
177
- eval_set = load_dataset(eval_set_repo, use_auth_token=COLLAB_TOKEN, split="filtered")
178
- def random_sample(r: gr.Request, subset):
179
- if subset is None or subset == []:
180
- sample_index = np.random.randint(0, len(eval_set) - 1)
181
- sample = eval_set[sample_index]
182
- else: # filter by subsets (can be list)
183
- if isinstance(subset, str):
184
- subset = [subset]
185
- # filter down dataset to only include the subset(s)
186
- eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset)
187
- sample_index = np.random.randint(0, len(eval_set_filtered) - 1)
188
- sample = eval_set_filtered[sample_index]
189
-
190
- markdown_text = '\n\n'.join([f"**{key}**:\n\n{value}" for key, value in sample.items()])
191
- return markdown_text
192
-
193
- subsets = eval_set.unique("subset")
194
-
195
- color_map = {
196
- "Generative": "#7497db",
197
- "Custom Classifier": "#E8ECF2",
198
- "Seq. Classifier": "#ffcd75",
199
- "DPO": "#75809c",
200
- }
201
- def color_model_type_column(df, color_map):
202
- """
203
- Apply color to the 'Model Type' column of the DataFrame based on a given color mapping.
204
-
205
- Parameters:
206
- df (pd.DataFrame): The DataFrame containing the 'Model Type' column.
207
- color_map (dict): A dictionary mapping model types to colors.
208
-
209
- Returns:
210
- pd.Styler: The styled DataFrame.
211
- """
212
- # Function to apply color based on the model type
213
- def apply_color(val):
214
- color = color_map.get(val, "default") # Default color if not specified in color_map
215
- return f'background-color: {color}'
216
-
217
- # Format for different columns
218
- format_dict = {col: "{:.1f}" for col in df.columns if col not in ['Average', 'Model', 'Model Type']}
219
- format_dict['Average'] = "{:.2f}"
220
- format_dict[''] = "{:d}"
221
-
222
- return df.style.applymap(apply_color, subset=['Model Type']).format(format_dict, na_rep='')
223
-
224
- def regex_table(dataframe, regex, filter_button, style=True):
225
- """
226
- Takes a model name as a regex, then returns only the rows that has that in it.
227
- """
228
- # Split regex statement by comma and trim whitespace around regexes
229
- regex_list = [x.strip() for x in regex.split(",")]
230
- # Join the list into a single regex pattern with '|' acting as OR
231
- combined_regex = '|'.join(regex_list)
232
-
233
- # remove internal ai2 data
234
- dataframe = dataframe[~dataframe["Model"].str.contains("ai2", case=False, na=False)]
235
-
236
- # if filter_button, remove all rows with "ai2" in the model name
237
- update_scores = False
238
- if isinstance(filter_button, list) or isinstance(filter_button, str):
239
- if "Prior Sets" not in filter_button and 'Prior Sets (0.5 weight)' in dataframe.columns:
240
- update_scores = True
241
- # remove the column "Prior Sets (0.5 weight)" from the outputted table
242
- dataframe = dataframe.drop(columns=['Prior Sets (0.5 weight)'])
243
- if "Seq. Classifiers" not in filter_button:
244
- dataframe = dataframe[~dataframe["Model Type"].str.contains("Seq. Classifier", case=False, na=False)]
245
- if "DPO" not in filter_button:
246
- dataframe = dataframe[~dataframe["Model Type"].str.contains("DPO", case=False, na=False)]
247
- if "Custom Classifiers" not in filter_button:
248
- dataframe = dataframe[~dataframe["Model Type"].str.contains("Custom Classifier", case=False, na=False)]
249
- if "Generative" not in filter_button:
250
- dataframe = dataframe[~dataframe["Model Type"].str.contains("generative", case=False, na=False)]
251
- # Filter the dataframe such that 'model' contains any of the regex patterns
252
- data = dataframe[dataframe["Model"].str.contains(combined_regex, case=False, na=False)]
253
-
254
- # if update the score to not use prior sets, do so
255
- if update_scores:
256
- data["Score"] = (data["Chat"] + data["Chat Hard"] + data["Safety"] + data["Reasoning"]) / 4
257
- # if "Prior Sets (0.5 weight)" in data.columns:
258
- # data["Prior Sets (0.5 weight)"] = np.nan
259
- # sort array by Score column
260
- data = data.sort_values(by='Score', ascending=False)
261
-
262
- data.reset_index(drop=True, inplace=True)
263
-
264
- # replace column '' with count/rank
265
- data[''] = np.arange(1, 1 + len(data))
266
-
267
- # if Score exists, round to 2 decimals
268
- if "Score" in data.columns:
269
- data["Score"] = np.round(np.array(data["Score"].values).astype(float), 2)
270
- if "Average" in data.columns:
271
- data["Average"] = np.round(np.array(data["Average"].values).astype(float), 1)
272
- # round all others to 1 decimal
273
- for col in data.columns:
274
- if col not in ["", "Model", "Model Type", "Score", "Average"]:
275
- # replace any data[col].values == '' with np.nan
276
- data[col] = data[col].replace('', np.nan)
277
- data[col] = np.round(np.array(data[col].values).astype(float), 1)
278
- if style:
279
- # apply color
280
- data = color_model_type_column(data, color_map)
281
-
282
- return data
283
-
284
-
285
- def printout(df):
286
- print(df.iloc[0])
287
- print(df.iloc[1])
288
-
289
- # import ipdb; ipdb.set_trace()
290
-
291
- total_models = len(regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"], style=False).values)
292
-
293
-
294
- with gr.Blocks(css=custom_css) as app:
295
- # create tabs for the app, moving the current table to one titled "rewardbench" and the benchmark_text to a tab called "About"
296
- with gr.Row():
297
- with gr.Column(scale=6):
298
- gr.Markdown(TOP_TEXT.format(str(total_models)))
299
- with gr.Column(scale=4):
300
- # search = gr.Textbox(label="Model Search (delimit with , )", placeholder="Regex search for a model")
301
- # filter_button = gr.Checkbox(label="Include AI2 training runs (or type ai2 above).", interactive=True)
302
- # img = gr.Image(value="https://private-user-images.githubusercontent.com/10695622/310698241-24ed272a-0844-451f-b414-fde57478703e.png", width=500)
303
- gr.Markdown("""
304
- ![](file/src/logo.png)
305
- """)
306
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
307
- with gr.TabItem("🏆 RewardBench Leaderboard"):
308
- with gr.Row():
309
- search_1 = gr.Textbox(label="Model Search (delimit with , )",
310
- placeholder="Model Search (delimit with , )",
311
- show_label=False)
312
- model_types_1 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative", "Prior Sets"],
313
- value=["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
314
- label="Model Types",
315
- show_label=False,
316
- # info="Which model types to include.",
317
- )
318
- with gr.Row():
319
- # reference data
320
- rewardbench_table_hidden = gr.Dataframe(
321
- rewardbench_data_avg.values,
322
- datatype=col_types_rewardbench_avg,
323
- headers=rewardbench_data_avg.columns.tolist(),
324
- visible=False,
325
- )
326
- rewardbench_table = gr.Dataframe(
327
- regex_table(rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"]),
328
- datatype=col_types_rewardbench_avg,
329
- headers=rewardbench_data_avg.columns.tolist(),
330
- elem_id="rewardbench_dataframe_avg",
331
- max_height=1000,
332
- )
333
-
334
- with gr.TabItem("🔍 RewardBench - Detailed"):
335
- with gr.Row():
336
- search_2 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )")
337
- model_types_2 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
338
- value=["Seq. Classifiers", "DPO", "Generative", "Custom Classifiers"],
339
- label="Model Types",
340
- show_label=False,
341
- # info="Which model types to include."
342
- )
343
- with gr.Row():
344
- # ref data
345
- rewardbench_table_detailed_hidden = gr.Dataframe(
346
- rewardbench_data.values,
347
- datatype=col_types_rewardbench,
348
- headers=rewardbench_data.columns.tolist(),
349
- visible=False,
350
- )
351
- rewardbench_table_detailed = gr.Dataframe(
352
- regex_table(rewardbench_data.copy(), "", ["Seq. Classifiers", "DPO", "Generative", "Custom Classifiers"]),
353
- datatype=col_types_rewardbench,
354
- headers=rewardbench_data.columns.tolist(),
355
- elem_id="rewardbench_dataframe",
356
- max_height=1000,
357
- )
358
- # with gr.TabItem("rewardbench Eval Set - Length Bias"):
359
- # with gr.Row():
360
- # # backup
361
- # rewardbench_table_len_hidden = gr.Dataframe(
362
- # rewardbench_data_length.values,
363
- # datatype=cols_rewardbench_data_length,
364
- # headers=rewardbench_data_length.columns.tolist(),
365
- # visible=False,
366
- # )
367
- # rewardbench_table_len = gr.Dataframe(
368
- # regex_table(rewardbench_data_length.copy(), "", False).values,
369
- # datatype=cols_rewardbench_data_length,
370
- # headers=rewardbench_data_length.columns.tolist(),
371
- # elem_id="rewardbench_dataframe_length",
372
- # height=1000,
373
- # )
374
- with gr.TabItem("Prior Test Sets"):
375
- with gr.Row():
376
- search_3 = gr.Textbox(label="Model Search (delimit with , )", show_label=False, placeholder="Model Search (delimit with , )")
377
- model_types_3 = gr.CheckboxGroup(["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
378
- value=["Seq. Classifiers", "DPO", "Custom Classifiers"],
379
- label="Model Types",
380
- show_label=False,
381
- # info="Which model types to include.",
382
- )
383
- with gr.Row():
384
- PREF_SET_TEXT = """
385
- For more information, see the [dataset](https://huggingface.co/datasets/allenai/pref-test-sets). Only the subsets Anthropic Helpful, Anthropic HHH, Stanford SHP, and OpenAI's Summarize data are used in the leaderboard ranking.
386
- """
387
- gr.Markdown(PREF_SET_TEXT)
388
- with gr.Row():
389
- # backup
390
- pref_sets_table_hidden = gr.Dataframe(
391
- prefs_data.values,
392
- datatype=col_types_prefs,
393
- headers=prefs_data.columns.tolist(),
394
- visible=False,
395
- )
396
- pref_sets_table = gr.Dataframe(
397
- regex_table(prefs_data.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers"]),
398
- datatype=col_types_prefs,
399
- headers=prefs_data.columns.tolist(),
400
- elem_id="prefs_dataframe",
401
- max_height=1000,
402
- )
403
-
404
-
405
- with gr.TabItem("About"):
406
- with gr.Row():
407
- gr.Markdown(ABOUT_TEXT)
408
-
409
- with gr.TabItem("Dataset Viewer"):
410
- with gr.Row():
411
- # loads one sample
412
- gr.Markdown("""## Random Dataset Sample Viewer
413
- Warning, refusals, XSTest, and donotanswer datasets have sensitive content.""")
414
- subset_selector = gr.Dropdown(subsets, label="Subset", value=None, multiselect=True)
415
- button = gr.Button("Show Random Sample")
416
-
417
- with gr.Row():
418
- sample_display = gr.Markdown("{sampled data loads here}")
419
-
420
- button.click(fn=random_sample, inputs=[subset_selector], outputs=[sample_display])
421
- # removed plot because not pretty enough
422
- # with gr.TabItem("Model Correlation"):
423
- # with gr.Row():
424
- # plot = plot_avg_correlation(rewardbench_data_avg, prefs_data)
425
- # gr.Plot(plot)
426
-
427
- search_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table)
428
- search_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed)
429
- # search.change(regex_table, inputs=[rewardbench_table_len_hidden, search, filter_button], outputs=rewardbench_table_len)
430
- search_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table)
431
-
432
- model_types_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table)
433
- model_types_2.change(regex_table, inputs=[rewardbench_table_detailed_hidden, search_2, model_types_2], outputs=rewardbench_table_detailed)
434
- model_types_3.change(regex_table, inputs=[pref_sets_table_hidden, search_3, model_types_3], outputs=pref_sets_table)
435
-
436
- with gr.Row():
437
- with gr.Accordion("📚 Citation", open=False):
438
- citation_button = gr.Textbox(
439
- value=r"""@misc{RewardBench,
440
- title={RewardBench: Evaluating Reward Models for Language Modeling},
441
- author={Lambert, Nathan and Pyatkin, Valentina and Morrison, Jacob and Miranda, LJ and Lin, Bill Yuchen and Chandu, Khyathi and Dziri, Nouha and Kumar, Sachin and Zick, Tom and Choi, Yejin and Smith, Noah A. and Hajishirzi, Hannaneh},
442
- year={2024},
443
- howpublished={\url{https://huggingface.co/spaces/allenai/reward-bench}
444
- }""",
445
- lines=7,
446
- label="Copy the following to cite these results.",
447
- elem_id="citation-button",
448
- show_copy_button=True,
449
- )
450
- # Load data when app starts, TODO make this used somewhere...
451
- # def load_data_on_start():
452
- # data_rewardbench = load_all_data(repo_dir_rewardbench)
453
- # rewardbench_table.update(data_rewardbench)
454
-
455
- # data_rewardbench_avg = avg_over_rewardbench(repo_dir_rewardbench)
456
- # rewardbench_table.update(data_rewardbench_avg)
457
-
458
- # data_prefs = load_all_data(repo_dir_prefs)
459
- # pref_sets_table.update(data_prefs)
460
-
461
- scheduler = BackgroundScheduler()
462
- scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h
463
- scheduler.start()
464
- app.launch(allowed_paths=['src/']) # had .queue() before launch before... not sure if that's necessary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/constants.py DELETED
@@ -1,60 +0,0 @@
1
- # reference for length bias categories
2
- length_categories = {
3
- 'alpacaeval-easy': 'True',
4
- 'alpacaeval-hard': 'True',
5
- 'alpacaeval-length': 'Neutral',
6
- 'donotanswer': 'False',
7
- 'hep-cpp': 'Neutral',
8
- 'hep-go': 'Neutral',
9
- 'hep-java': 'Neutral',
10
- 'hep-js': 'Neutral',
11
- 'hep-python': 'Neutral',
12
- 'hep-rust': 'Neutral',
13
- 'llmbar-adver-GPTInst': 'False',
14
- 'llmbar-adver-GPTOut': 'Neutral',
15
- 'llmbar-adver-manual': 'False',
16
- 'llmbar-adver-neighbor': 'False',
17
- 'llmbar-natural': 'Neutral',
18
- 'math-prm': 'Neutral',
19
- 'mt-bench-easy': 'False',
20
- 'mt-bench-hard': 'False',
21
- 'mt-bench-med': 'Neutral',
22
- 'refusals-dangerous': 'False',
23
- 'refusals-offensive': 'False',
24
- 'xstest-should-refuse': 'False',
25
- 'xstest-should-respond': 'True'
26
- }
27
-
28
- example_counts = {
29
- "alpacaeval-easy": 100,
30
- "alpacaeval-length": 95,
31
- "alpacaeval-hard": 95,
32
- "mt-bench-easy": 28,
33
- "mt-bench-med": 40,
34
- "mt-bench-hard": 37,
35
- "math-prm": 984, # actual length 447, upweighting to be equal to code
36
- "refusals-dangerous": 100,
37
- "refusals-offensive": 100,
38
- "llmbar-natural": 100,
39
- "llmbar-adver-neighbor": 134,
40
- "llmbar-adver-GPTInst": 92,
41
- "llmbar-adver-GPTOut": 47,
42
- "llmbar-adver-manual": 46,
43
- "xstest-should-refuse": 154,
44
- "xstest-should-respond": 250, # Note, refuse and respond were accidentally swapped until 9 Sept 2024
45
- "donotanswer": 136,
46
- "hep-cpp": 164,
47
- "hep-go": 164,
48
- "hep-java": 164,
49
- "hep-js": 164,
50
- "hep-python": 164,
51
- "hep-rust": 164
52
- }
53
-
54
- # note, this order should match the dataframe.
55
- subset_mapping = {
56
- "Chat": ['alpacaeval-easy', 'alpacaeval-hard', 'alpacaeval-length', 'mt-bench-easy', 'mt-bench-med'],
57
- "Chat Hard": ['llmbar-adver-GPTInst', 'llmbar-adver-GPTOut', 'llmbar-adver-manual', 'llmbar-adver-neighbor', 'llmbar-natural', 'mt-bench-hard'],
58
- "Safety": ['donotanswer', 'refusals-dangerous', 'refusals-offensive', 'xstest-should-refuse', 'xstest-should-respond'],
59
- "Reasoning": ["hep-cpp", "hep-go", "hep-java", "hep-js", "hep-python", "hep-rust", "math-prm"]
60
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/logo_old.png DELETED
Binary file (50 kB)
 
src/md.py CHANGED
@@ -2,7 +2,24 @@ from datetime import datetime
2
  import pytz
3
 
4
  ABOUT_TEXT = """
5
- TODO
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  """
7
 
8
  # Get Pacific time zone (handles PST/PDT automatically)
@@ -10,5 +27,5 @@ pacific_tz = pytz.timezone('America/Los_Angeles')
10
  current_time = datetime.now(pacific_tz).strftime("%H:%M %Z, %d %b %Y")
11
 
12
  TOP_TEXT = f"""# HREF: Human Reference Guided Evaluation for Instructiong Following
13
- [Code]() | [Eval. Dataset]() | [Prior Test Sets]() | [Results]() | [Paper]() | Total models: {{}} | * Unverified models | ⚠️ Dataset Contamination | Last restart (PST): {current_time}
14
  """
 
2
  import pytz
3
 
4
  ABOUT_TEXT = """
5
+ HREF is evaluation benchmark that evaluates language models' capacity of following human instructions. It is consisted of 4,258 instructions covering 11 distinct categories, including Brainstorm ,Open QA ,Closed QA ,Extract ,Generation ,Rewrite ,Summarize ,Coding ,Classify ,Fact Checking or Attributed QA ,Multi-Document Synthesis , and Reasoning Over Numerical Data.
6
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64dff1ddb5cc372803af964d/dSv3U11h936t_q-aiqbkV.png)
7
+
8
+ ## Why HREF
9
+ | Benchmark | Size | Evaluation Method | Baseline Model | Judge Model | Task Oriented | Contamination Resistant | Contains Human Reference|
10
+ |--------------------|-------|------------|----------------|----------------|----------|------------|-----------|
11
+ | MT-Bench | 80 | Score | --- | gpt4 | ✓ | ✗ | ✗ |
12
+ | AlpacaEval 2.0 | 805 | PWC | gpt4-turbo | gpt4-turbo | ✗ | ✗ | ✗ |
13
+ | Chatbot Arena | --- | PWC | --- | Human | ✗ | ✓ | ✗ |
14
+ | Arena-Hard | 500 | PWC | gpt4-0314 | gpt4-turbo | ✗ | ✗ | ✗ |
15
+ | WildBench | 1,024 | Score/PWC | gpt4-turbo | three models | ✗ | ✗ | ✗ |
16
+ | **HREF** | 4,258 | PWC | Llama-3.1-405B-Instruct | Llama-3.1-70B-Instruct | ✓ | ✓ | ✓ |
17
+
18
+ - **Human Reference**: HREF leverages human-written answer as reference to provide more reliable evaluation than previous method.
19
+ - **Large**: HREF has the largest evaluation size among similar benchmarks, making its evaluation more reliable.
20
+ - **Contamination-resistant**: HREF's evaluation set is hidden and uses public models for both the baseline model and judge model, which makes it completely free of contamination.
21
+ - **Task Oriented**: Instead of naturally collected instructions from the user, HREF contains instructions that are written specifically targetting 8 distinct categories that are used in instruction tuning, which allows it to provide more insights about how to improve language models.
22
+
23
  """
24
 
25
  # Get Pacific time zone (handles PST/PDT automatically)
 
27
  current_time = datetime.now(pacific_tz).strftime("%H:%M %Z, %d %b %Y")
28
 
29
  TOP_TEXT = f"""# HREF: Human Reference Guided Evaluation for Instructiong Following
30
+ [Code]() | [Validation Set]() | [Human Agreement Set]() | [Results]() | [Paper]() | Total models: {{}} | * Unverified models | ⚠️ Dataset Contamination | Last restart (PST): {current_time}
31
  """
src/md_old.py DELETED
@@ -1,105 +0,0 @@
1
- from datetime import datetime
2
- import pytz
3
-
4
- ABOUT_TEXT = """
5
- We compute the win percentage for a reward model on hand curated chosen-rejected pairs for each prompt.
6
- A win is when the score for the chosen response is higher than the score for the rejected response.
7
-
8
- Note: Models with (*) after the model name are independently submitted model scores which have not been verified by the RewardBench team.
9
-
10
- ## Overview
11
-
12
- We average over 4 core sections (per prompt weighting):
13
- 1. **Chat**: Includes the easy chat subsets (alpacaeval-easy, alpacaeval-length, alpacaeval-hard, mt-bench-easy, mt-bench-medium)
14
- 2. **Chat Hard**: Includes the hard chat subsets (mt-bench-hard, llmbar-natural, llmbar-adver-neighbor, llmbar-adver-GPTInst, llmbar-adver-GPTOut, llmbar-adver-manual)
15
- 3. **Safety**: Includes the safety subsets (refusals-dangerous, refusals-offensive, xstest-should-refuse, xstest-should-respond, do not answer)
16
- 4. **Reasoning**: Includes the code and math subsets (math-prm, hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
17
-
18
- For Reasoning, we increase the weight of the PRM-Math subset so code and math abilities are weighed equally in the final number, rather than increasing the relevance of code.
19
- We add a final column, **Prior Sets** -- includes the test sets ([anthropic_helpful](https://huggingface.co/datasets/Anthropic/hh-rlhf), [anthropic_hhh](https://huggingface.co/datasets/HuggingFaceH4/hhh_alignment), [shp](https://huggingface.co/datasets/stanfordnlp/SHP), [summarize](https://huggingface.co/datasets/openai/summarize_from_feedback))
20
- Prior sets is weighted 0.5x in the final score to avoid gamification by training on the available training sets of Anthropic HH, SHP, and Summarize.
21
-
22
- Once all subsets weighted averages are achieved, the final RewardBench score is the average across the 5 subset scores.
23
-
24
-
25
- We include multiple types of reward models in this evaluation:
26
- 1. **Sequence Classifiers** (Seq. Classifier): A model, normally trained with HuggingFace AutoModelForSequenceClassification, that takes in a prompt and a response and outputs a score.
27
- 2. **Custom Classifiers**: Research models with different architectures and training objectives to either take in two inputs at once or generate scores differently (e.g. PairRM and Stanford SteamSHP).
28
- 3. **DPO**: Models trained with Direct Preference Optimization (DPO), with modifiers such as `-ref-free` or `-norm` changing how scores are computed. *Note*: This also includes other models trained with implicit rewards, such as those trained with [KTO](https://arxiv.org/abs/2402.01306).
29
- 4. **Random**: Random choice baseline.
30
- 4. **Generative**: Prompting fine-tuned models to choose between two answers, similar to MT Bench and AlpacaEval.
31
-
32
- All models are evaluated in fp16 expect for Starling-7B, which is evaluated in fp32.
33
- *Note*: The reference models for DPO models (and other implicit rewards) can be found in two ways.
34
- * Click on a specific model in results and you'll see a key `ref_model`, e.g. [Qwen](https://huggingface.co/datasets/allenai/reward-bench-results/blob/main/eval-set/Qwen/Qwen1.5-72B-Chat.json).
35
- * All the reference models are listed in the [evaluation configs](https://github.com/allenai/reward-bench/blob/main/scripts/configs/eval_configs.yaml).
36
-
37
-
38
- ### Subset Details
39
-
40
- Total number of the prompts is: 2985, filtered from 5123.
41
-
42
- | Subset | Num. Samples (Pre-filtering, post-filtering) | Description |
43
- | :---------- | :-----: | :---------: |
44
- | alpacaeval-easy | 805, 100 | Great model vs poor model |
45
- | alpacaeval-length | 805, 95 | Good model vs low model, equal length |
46
- | alpacaeval-hard | 805, 95 | Great model vs baseline model |
47
- | mt-bench-easy | 28, 28 | MT Bench 10s vs 1s |
48
- | mt-bench-medium | 45, 40 | MT Bench 9s vs 2-5s |
49
- | mt-bench-hard | 45, 37 | MT Bench 7-8 vs 5-6 |
50
- | refusals-dangerous | 505, 100 | Dangerous response vs no response |
51
- | refusals-offensive | 704, 100 | Offensive response vs no response |
52
- | llmbar-natural | 100 | (See [paper](https://arxiv.org/abs/2310.07641)) Manually curated instruction pairs |
53
- | llmbar-adver-neighbor | 134 | (See [paper](https://arxiv.org/abs/2310.07641)) Instruction response vs. off-topic prompt response |
54
- | llmbar-adver-GPTInst | 92 | (See [paper](https://arxiv.org/abs/2310.07641)) Instruction response vs. GPT4 generated off-topic prompt response |
55
- | llmbar-adver-GPTOut | 47 | (See [paper](https://arxiv.org/abs/2310.07641)) Instruction response vs. unhelpful-prompted GPT4 responses |
56
- | llmbar-adver-manual | 46 | (See [paper](https://arxiv.org/abs/2310.07641)) Challenge set chosen vs. rejected |
57
- | xstest-should-refuse | 450, 154 | False response dataset (see [paper](https://arxiv.org/abs/2308.01263)) |
58
- | xstest-should-respond | 450, 250 | False refusal dataset (see [paper](https://arxiv.org/abs/2308.01263)) |
59
- | do not answer | 939, 136 | [Prompts which responsible LLMs do not answer](https://huggingface.co/datasets/LibrAI/do-not-answer) |
60
- | math-prm | 447 | Human references vs. model error from OpenAI's Let's Verify Step by Step |
61
- | hep-cpp | 164 | C++ code revisions (See [dataset](https://huggingface.co/datasets/bigcode/humanevalpack) or [paper](https://arxiv.org/abs/2308.07124)) |
62
- | hep-go | 164 | Go code |
63
- | hep-java | 164 | Java code |
64
- | hep-js | 164 | Javascript code |
65
- | hep-python | 164 | Python code |
66
- | hep-rust | 164 | Rust code |
67
-
68
- Lengths (mean, std. dev.) include the prompt
69
-
70
- | subset | length bias | chosen_chars | rejected_chars | chosen_tokens | rejected_tokens | chosen_unique_tokens | rejected_unique_tokens |
71
- |-----------------------|-------------|----------------|------------------|-----------------|-------------------|------------------------|--------------------------|
72
- | alpacaeval-easy | True | 2283 (1138) | 646 (482) | 591 (303) | 167 (139) | 253 (117) | 83 (46) |
73
- | alpacaeval-hard | True | 1590 (769) | 526 (430) | 412 (199) | 137 (117) | 173 (67) | 71 (48) |
74
- | alpacaeval-length | Neutral | 2001 (1137) | 2127 (1787) | 511 (283) | 597 (530) | 192 (85) | 189 (99) |
75
- | donotanswer | False | 755 (722) | 1389 (695) | 170 (161) | 320 (164) | 104 (82) | 157 (73) |
76
- | hep-cpp | Neutral | 709 (341) | 705 (342) | 261 (125) | 259 (125) | 100 (29) | 99 (29) |
77
- | hep-go | Neutral | 738 (361) | 734 (361) | 266 (118) | 265 (118) | 100 (29) | 99 (29) |
78
- | hep-java | Neutral | 821 (393) | 814 (390) | 263 (123) | 261 (122) | 102 (30) | 102 (30) |
79
- | hep-js | Neutral | 677 (341) | 673 (339) | 251 (129) | 250 (128) | 93 (29) | 93 (29) |
80
- | hep-python | Neutral | 618 (301) | 616 (300) | 212 (98) | 211 (98) | 86 (26) | 85 (26) |
81
- | hep-rust | Neutral | 666 (391) | 660 (391) | 221 (132) | 219 (132) | 95 (29) | 95 (29) |
82
- | llmbar-adver-GPTInst | False | 735 (578) | 1623 (1055) | 170 (135) | 377 (245) | 93 (59) | 179 (106) |
83
- | llmbar-adver-GPTOut | Neutral | 378 (339) | 359 (319) | 96 (81) | 101 (94) | 60 (45) | 55 (41) |
84
- | llmbar-adver-manual | False | 666 (584) | 1139 (866) | 160 (134) | 264 (194) | 92 (63) | 140 (90) |
85
- | llmbar-adver-neighbor | False | 287 (297) | 712 (749) | 70 (76) | 173 (175) | 43 (31) | 91 (70) |
86
- | llmbar-natural | Neutral | 553 (644) | 530 (597) | 139 (162) | 130 (140) | 75 (71) | 70 (62) |
87
- | mt-bench-easy | False | 1563 (720) | 2129 (1520) | 377 (159) | 551 (415) | 166 (55) | 116 (62) |
88
- | mt-bench-hard | False | 1225 (499) | 1471 (1016) | 284 (116) | 349 (234) | 131 (45) | 136 (58) |
89
- | mt-bench-med | Neutral | 1558 (729) | 1733 (1312) | 377 (170) | 410 (311) | 162 (58) | 145 (88) |
90
- | refusals-dangerous | False | 597 (81) | 1828 (547) | 131 (20) | 459 (136) | 90 (12) | 211 (50) |
91
- | refusals-offensive | False | 365 (116) | 1092 (1146) | 82 (25) | 299 (278) | 64 (15) | 134 (101) |
92
- | xstest-should-refuse | False | 584 (419) | 904 (493) | 129 (89) | 217 (115) | 81 (47) | 116 (53) |
93
- | xstest-should-respond | True | 771 (420) | 466 (427) | 189 (105) | 107 (94) | 104 (48) | 67 (48) |
94
-
95
- For more details, see the [dataset](https://huggingface.co/datasets/allenai/reward-bench).
96
- """
97
-
98
- # Get Pacific time zone (handles PST/PDT automatically)
99
- pacific_tz = pytz.timezone('America/Los_Angeles')
100
- current_time = datetime.now(pacific_tz).strftime("%H:%M %Z, %d %b %Y")
101
-
102
- TOP_TEXT = f"""# RewardBench: Evaluating Reward Models
103
- ### Evaluating the capabilities, safety, and pitfalls of reward models
104
- [Code](https://github.com/allenai/reward-bench) | [Eval. Dataset](https://huggingface.co/datasets/allenai/reward-bench) | [Prior Test Sets](https://huggingface.co/datasets/allenai/pref-test-sets) | [Results](https://huggingface.co/datasets/allenai/reward-bench-results) | [Paper](https://arxiv.org/abs/2403.13787) | Total models: {{}} | * Unverified models | ⚠️ Dataset Contamination | Last restart (PST): {current_time}
105
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/utils_old.py DELETED
@@ -1,171 +0,0 @@
1
- import pandas as pd
2
- from pathlib import Path
3
- from datasets import load_dataset
4
- import numpy as np
5
- import os
6
- import re
7
-
8
- UNVERIFIED_MODELS = [
9
- "nvidia/Nemotron-4-340B-Reward",
10
- "nvidia/Llama3-70B-SteerLM-RM",
11
- "Cohere May 2024",
12
- "google/gemini-1.5-pro-0514",
13
- "google/flame-24b-july-2024",
14
- "Cohere March 2024",
15
- "facebook/Self-taught-Llama-3-70B",
16
- "facebook/Self-taught-evaluator-llama3.1-70B",
17
- "google/flame-1.0-24B-july-2024",
18
- "Salesforce/SFR-LLaMa-3.1-70B-Judge-r",
19
- "Salesforce/SFR-nemo-12B-Judge-r",
20
- "Salesforce/SFR-LLaMa-3.1-8B-Judge-r",
21
- "SF-Foundation/TextEval-OffsetBias-12B",
22
- "SF-Foundation/TextEval-Llama3.1-70B",
23
- "nvidia/Llama-3.1-Nemotron-70B-Reward",
24
- ]
25
-
26
- CONTAMINATED_MODELS = [
27
- "Skywork/Skywork-Reward-Gemma-2-27B",
28
- "Skywork/Skywork-Critic-Llama-3.1-70B",
29
- "LxzGordon/URM-LLaMa-3.1-8B",
30
- "Skywork/Skywork-Reward-Llama-3.1-8B",
31
- "Ray2333/GRM-Llama3-8B-rewardmodel-ft",
32
- "nicolinho/QRM-Llama3.1-8B",
33
- "nicolinho/QRM-Llama3-8B",
34
- "general-preference/GPM-Llama-3.1-8B",
35
- "general-preference/GPM-Gemma-2B"
36
- ]
37
-
38
- # From Open LLM Leaderboard
39
- def model_hyperlink(link, model_name):
40
- # if model_name is above 50 characters, return first 47 characters and "..."
41
- if len(model_name) > 50:
42
- model_name = model_name[:47] + "..."
43
- if model_name == "random":
44
- output = "random"
45
- elif model_name == "Cohere March 2024":
46
- output = f'<a target="_blank" href="https://huggingface.co/Cohere" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
47
- elif "openai" == model_name.split("/")[0]:
48
- output = f'<a target="_blank" href="https://huggingface.co/openai" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
49
- elif "Anthropic" == model_name.split("/")[0]:
50
- output = f'<a target="_blank" href="https://huggingface.co/Anthropic" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
51
- elif "google" == model_name.split("/")[0]:
52
- output = f'<a target="_blank" href="https://huggingface.co/google" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
53
- elif "PoLL" == model_name.split("/")[0]:
54
- output = model_name
55
- output = f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
56
-
57
- if model_name in UNVERIFIED_MODELS:
58
- output += " *"
59
- if model_name in CONTAMINATED_MODELS:
60
- output += " ⚠️"
61
- return output
62
-
63
- def undo_hyperlink(html_string):
64
- # Regex pattern to match content inside > and <
65
- pattern = r'>[^<]+<'
66
- match = re.search(pattern, html_string)
67
- if match:
68
- # Extract the matched text and remove leading '>' and trailing '<'
69
- return match.group(0)[1:-1]
70
- else:
71
- return "No text found"
72
-
73
-
74
- # Define a function to fetch and process data
75
- def load_all_data(data_repo, subdir:str, subsubsets=False): # use HF api to pull the git repo
76
- dir = Path(data_repo)
77
- data_dir = dir / subdir
78
- orgs = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, d))]
79
- # get all files within the sub folders orgs
80
- models_results = []
81
- for org in orgs:
82
- org_dir = data_dir / org
83
- files = [f for f in os.listdir(org_dir) if os.path.isfile(os.path.join(org_dir, f))]
84
- for file in files:
85
- if file.endswith(".json"):
86
- models_results.append(org + "/" + file)
87
-
88
- # create empty dataframe to add all data to
89
- df = pd.DataFrame()
90
-
91
- # load all json data in the list models_results one by one to avoid not having the same entries
92
- for model in models_results:
93
- model_data = load_dataset("json", data_files=data_repo + subdir+ "/" + model, split="train")
94
- df2 = pd.DataFrame(model_data)
95
- # add to df
96
- df = pd.concat([df2, df])
97
-
98
-
99
- # remove chat_template comlumn
100
- df = df.drop(columns=["chat_template"])
101
-
102
- # sort columns alphabetically
103
- df = df.reindex(sorted(df.columns), axis=1)
104
-
105
- # move column "model" to the front
106
- cols = list(df.columns)
107
- cols.insert(0, cols.pop(cols.index('model')))
108
- df = df.loc[:, cols]
109
-
110
- # select all columns except "model"
111
- cols = df.columns.tolist()
112
- cols.remove("model")
113
- # if model_type is a column (pref tests may not have it)
114
- if "model_type" in cols:
115
- cols.remove("model_type")
116
- # remove ref_model if in columns
117
- if "ref_model" in cols:
118
- cols.remove("ref_model")
119
- # remove model_beaker from dataframe
120
- if "model_beaker" in cols:
121
- cols.remove("model_beaker")
122
- df = df.drop(columns=["model_beaker"])
123
-
124
- # remove column xstest (outdated data)
125
- # if xstest is a column
126
- if "xstest" in cols:
127
- df = df.drop(columns=["xstest"])
128
- cols.remove("xstest")
129
-
130
- if "ref_model" in df.columns:
131
- df = df.drop(columns=["ref_model"])
132
-
133
- # remove column anthropic and summarize_prompted (outdated data)
134
- if "anthropic" in cols:
135
- df = df.drop(columns=["anthropic"])
136
- cols.remove("anthropic")
137
- if "summarize_prompted" in cols:
138
- df = df.drop(columns=["summarize_prompted"])
139
- cols.remove("summarize_prompted")
140
- # remove pku_better and pku_safer (removed from the leaderboard)
141
- if "pku_better" in cols:
142
- df = df.drop(columns=["pku_better"])
143
- cols.remove("pku_better")
144
- if "pku_safer" in cols:
145
- df = df.drop(columns=["pku_safer"])
146
- cols.remove("pku_safer")
147
-
148
- # convert to score
149
- df[cols] = (df[cols]*100)
150
- avg = np.nanmean(df[cols].values,axis=1)
151
- # add average column
152
- df["average"] = avg
153
-
154
- # apply model_hyperlink function to column "model"
155
- df["model"] = df["model"].apply(lambda x: model_hyperlink(f"https://huggingface.co/{x}", x))
156
-
157
- # move average column to the second
158
- cols = list(df.columns)
159
- cols.insert(1, cols.pop(cols.index('average')))
160
- df = df.loc[:, cols]
161
-
162
- # move model_type column to first
163
- if "model_type" in cols:
164
- cols = list(df.columns)
165
- cols.insert(1, cols.pop(cols.index('model_type')))
166
- df = df.loc[:, cols]
167
-
168
- # remove models with DPO Ref. Free as type (future work)
169
- df = df[~df["model_type"].str.contains("DPO Ref. Free", na=False)]
170
-
171
- return df