IrinaArmstrong commited on
Commit
638c800
1 Parent(s): fa64e2d

Updated leaderboard

Browse files
.gitignore CHANGED
@@ -6,8 +6,4 @@ __pycache__/
6
  *ipynb
7
  .vscode/
8
 
9
- eval-queue/
10
- eval-results/
11
- eval-queue-bk/
12
- eval-results-bk/
13
  logs/
 
6
  *ipynb
7
  .vscode/
8
 
 
 
 
 
9
  logs/
app.py CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
 
6
 
7
  from src.about import (
8
  CITATION_BUTTON_LABEL,
@@ -26,7 +27,7 @@ from src.display.utils import (
26
  WeightType,
27
  Precision
28
  )
29
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
  from src.submission.submit import add_new_eval
32
 
@@ -34,30 +35,34 @@ from src.submission.submit import add_new_eval
34
  def restart_space():
35
  API.restart_space(repo_id=REPO_ID)
36
 
37
- try:
38
- print(EVAL_REQUESTS_PATH)
39
- snapshot_download(
40
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
41
- )
42
- except Exception:
43
- restart_space()
44
- try:
45
- print(EVAL_RESULTS_PATH)
46
- snapshot_download(
47
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
48
- )
49
- except Exception:
50
- restart_space()
51
-
52
-
53
- raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
 
 
 
 
54
  leaderboard_df = original_df.copy()
55
 
56
- (
57
- finished_eval_queue_df,
58
- running_eval_queue_df,
59
- pending_eval_queue_df,
60
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
61
 
62
 
63
  # Searching and filtering
@@ -242,92 +247,92 @@ with demo:
242
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
243
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
244
 
245
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
246
- with gr.Column():
247
- with gr.Row():
248
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
249
-
250
- with gr.Column():
251
- with gr.Accordion(
252
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
253
- open=False,
254
- ):
255
- with gr.Row():
256
- finished_eval_table = gr.components.Dataframe(
257
- value=finished_eval_queue_df,
258
- headers=EVAL_COLS,
259
- datatype=EVAL_TYPES,
260
- row_count=5,
261
- )
262
- with gr.Accordion(
263
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
264
- open=False,
265
- ):
266
- with gr.Row():
267
- running_eval_table = gr.components.Dataframe(
268
- value=running_eval_queue_df,
269
- headers=EVAL_COLS,
270
- datatype=EVAL_TYPES,
271
- row_count=5,
272
- )
273
-
274
- with gr.Accordion(
275
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
276
- open=False,
277
- ):
278
- with gr.Row():
279
- pending_eval_table = gr.components.Dataframe(
280
- value=pending_eval_queue_df,
281
- headers=EVAL_COLS,
282
- datatype=EVAL_TYPES,
283
- row_count=5,
284
- )
285
- with gr.Row():
286
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
287
-
288
- with gr.Row():
289
- with gr.Column():
290
- model_name_textbox = gr.Textbox(label="Model name")
291
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
292
- model_type = gr.Dropdown(
293
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
294
- label="Model type",
295
- multiselect=False,
296
- value=None,
297
- interactive=True,
298
- )
299
-
300
- with gr.Column():
301
- precision = gr.Dropdown(
302
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
303
- label="Precision",
304
- multiselect=False,
305
- value="float16",
306
- interactive=True,
307
- )
308
- weight_type = gr.Dropdown(
309
- choices=[i.value.name for i in WeightType],
310
- label="Weights type",
311
- multiselect=False,
312
- value="Original",
313
- interactive=True,
314
- )
315
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
316
-
317
- submit_button = gr.Button("Submit Eval")
318
- submission_result = gr.Markdown()
319
- submit_button.click(
320
- add_new_eval,
321
- [
322
- model_name_textbox,
323
- base_model_name_textbox,
324
- revision_name_textbox,
325
- precision,
326
- weight_type,
327
- model_type,
328
- ],
329
- submission_result,
330
- )
331
 
332
  with gr.Row():
333
  with gr.Accordion("📙 Citation", open=False):
 
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
6
+ from pathlib import Path
7
 
8
  from src.about import (
9
  CITATION_BUTTON_LABEL,
 
27
  WeightType,
28
  Precision
29
  )
30
+ from src.envs import API, EVAL_RESULTS_PATH, REPO_ID, RESULTS_REPO, TOKEN
31
  from src.populate import get_evaluation_queue_df, get_leaderboard_df
32
  from src.submission.submit import add_new_eval
33
 
 
35
  def restart_space():
36
  API.restart_space(repo_id=REPO_ID)
37
 
38
+ # try:
39
+ # print(EVAL_REQUESTS_PATH)
40
+ # snapshot_download(
41
+ # repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
42
+ # )
43
+ # except Exception:
44
+ # restart_space()
45
+ # try:
46
+ # print(EVAL_RESULTS_PATH)
47
+ # snapshot_download(
48
+ # repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
49
+ # )
50
+ # except Exception:
51
+ # restart_space()
52
+
53
+ results_files = [fn for fn in Path(EVAL_RESULTS_PATH).glob("*.json")]
54
+ if not len(results_files):
55
+ print(f"No results found in results path: {EVAL_RESULTS_PATH}")
56
+
57
+
58
+ raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, COLS, BENCHMARK_COLS)
59
  leaderboard_df = original_df.copy()
60
 
61
+ # (
62
+ # finished_eval_queue_df,
63
+ # running_eval_queue_df,
64
+ # pending_eval_queue_df,
65
+ # ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
66
 
67
 
68
  # Searching and filtering
 
247
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
248
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
249
 
250
+ # with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
251
+ # with gr.Column():
252
+ # with gr.Row():
253
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
254
+ #
255
+ # with gr.Column():
256
+ # with gr.Accordion(
257
+ # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
258
+ # open=False,
259
+ # ):
260
+ # with gr.Row():
261
+ # finished_eval_table = gr.components.Dataframe(
262
+ # value=finished_eval_queue_df,
263
+ # headers=EVAL_COLS,
264
+ # datatype=EVAL_TYPES,
265
+ # row_count=5,
266
+ # )
267
+ # with gr.Accordion(
268
+ # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
269
+ # open=False,
270
+ # ):
271
+ # with gr.Row():
272
+ # running_eval_table = gr.components.Dataframe(
273
+ # value=running_eval_queue_df,
274
+ # headers=EVAL_COLS,
275
+ # datatype=EVAL_TYPES,
276
+ # row_count=5,
277
+ # )
278
+ #
279
+ # with gr.Accordion(
280
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
281
+ # open=False,
282
+ # ):
283
+ # with gr.Row():
284
+ # pending_eval_table = gr.components.Dataframe(
285
+ # value=pending_eval_queue_df,
286
+ # headers=EVAL_COLS,
287
+ # datatype=EVAL_TYPES,
288
+ # row_count=5,
289
+ # )
290
+ # with gr.Row():
291
+ # gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
292
+ #
293
+ # with gr.Row():
294
+ # with gr.Column():
295
+ # model_name_textbox = gr.Textbox(label="Model name")
296
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
297
+ # model_type = gr.Dropdown(
298
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
299
+ # label="Model type",
300
+ # multiselect=False,
301
+ # value=None,
302
+ # interactive=True,
303
+ # )
304
+ #
305
+ # with gr.Column():
306
+ # precision = gr.Dropdown(
307
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
308
+ # label="Precision",
309
+ # multiselect=False,
310
+ # value="float16",
311
+ # interactive=True,
312
+ # )
313
+ # weight_type = gr.Dropdown(
314
+ # choices=[i.value.name for i in WeightType],
315
+ # label="Weights type",
316
+ # multiselect=False,
317
+ # value="Original",
318
+ # interactive=True,
319
+ # )
320
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
321
+ #
322
+ # submit_button = gr.Button("Submit Eval")
323
+ # submission_result = gr.Markdown()
324
+ # submit_button.click(
325
+ # add_new_eval,
326
+ # [
327
+ # model_name_textbox,
328
+ # base_model_name_textbox,
329
+ # revision_name_textbox,
330
+ # precision,
331
+ # weight_type,
332
+ # model_type,
333
+ # ],
334
+ # submission_result,
335
+ # )
336
 
337
  with gr.Row():
338
  with gr.Accordion("📙 Citation", open=False):
eval-results/results_HuggingFaceH4-zephyr-7b-beta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "HuggingFaceH4/zephyr-7b-beta", "model_sha": "main"}, "results": {"hs": {"delta": -0.039}, "d": {"delta": 0.312}, "hy": {"delta": -0.018}, "pd": {"delta": -0.025}, "pa": {"delta": -0.044}, "pf": {"delta": 0.133}, "sc": {"delta": 0.144}, "ma": {"delta": 0.16}, "si": {"delta": 0.278}, "l": {"delta": -0.016555555}}}
eval-results/results_NousResearch-Meta-Llama-3-8B-Instruct.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "NousResearch/Meta-Llama-3-8B-Instruct", "model_sha": "main"}, "results": {"hs": {"delta": 0.041}, "d": {"delta": 0.322}, "hy": {"delta": 0.01}, "pd": {"delta": 0.012}, "pa": {"delta": -0.061}, "pf": {"delta": 0.211}, "sc": {"delta": 0.231}, "ma": {"delta": 0.091}, "si": {"delta": 0.382}, "l": {"delta": -0.018}}}
eval-results/results_NousResearch-Meta-Llama-3-8B.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "NousResearch/Meta-Llama-3-8B", "model_sha": "main"}, "results": {"hs": {"delta": -0.006}, "d": {"delta": 0.176}, "hy": {"delta": -0.012}, "pd": {"delta": -0.013}, "pa": {"delta": 0.0}, "pf": {"delta": 0.059}, "sc": {"delta": 0.101}, "ma": {"delta": 0.033}, "si": {"delta": 0.231}, "l": {"delta": -0.007333333}}}
eval-results/results_berkeley-nest-Starling-LM-7B-alpha.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "berkeley-nest/Starling-LM-7B-alpha", "model_sha": "main"}, "results": {"hs": {"delta": -0.073}, "d": {"delta": 0.681}, "hy": {"delta": 0.0}, "pd": {"delta": -0.029}, "pa": {"delta": -0.111}, "pf": {"delta": 0.262}, "sc": {"delta": 0.26}, "ma": {"delta": 0.011}, "si": {"delta": 0.5}, "l": {"delta": 0.008333334}}}
eval-results/results_google-gemma-7b-it.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "google/gemma-7b-it", "model_sha": "main"}, "results": {"hs": {"delta": 0.1}, "d": {"delta": 0.475}, "hy": {"delta": 0.105}, "pd": {"delta": 0.021}, "pa": {"delta": 0.09}, "pf": {"delta": 0.079}, "sc": {"delta": 0.21}, "ma": {"delta": 0.048}, "si": {"delta": 0.335}, "l": {"delta": -0.0123333335}}}
eval-results/results_google-gemma-7b.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "google/gemma-7b", "model_sha": "main"}, "results": {"hs": {"delta": -0.033}, "d": {"delta": 0.42}, "hy": {"delta": 0.06}, "pd": {"delta": 0.015}, "pa": {"delta": 0.123}, "pf": {"delta": 0.208}, "sc": {"delta": 0.164}, "ma": {"delta": 0.18}, "si": {"delta": 0.491}, "l": {"delta": -0.04222222}}}
eval-results/results_meta-llama-Llama-2-7b-chat-hf.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "meta-llama/Llama-2-7b-chat-hf", "model_sha": "main"}, "results": {"hs": {"delta": -0.037}, "d": {"delta": 0.059}, "hy": {"delta": -0.035}, "pd": {"delta": -0.014}, "pa": {"delta": 0.022}, "pf": {"delta": 0.122}, "sc": {"delta": 0.181}, "ma": {"delta": 0.19}, "si": {"delta": 0.132}, "l": {"delta": -0.047333334}}}
eval-results/results_meta-llama-Llama-2-7b-hf.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "meta-llama/Llama-2-7b-hf", "model_sha": "main"}, "results": {"hs": {"delta": -0.054}, "d": {"delta": 0.128}, "hy": {"delta": 0.02}, "pd": {"delta": 0.025}, "pa": {"delta": 0.088}, "pf": {"delta": 0.021}, "sc": {"delta": 0.078}, "ma": {"delta": 0.048}, "si": {"delta": 0.2}, "l": {"delta": 0.021000002}}}
eval-results/results_microsoft-Phi-3-mini-128k-instruct.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "microsoft/Phi-3-mini-128k-instruct", "model_sha": "main"}, "results": {"hs": {"delta": 0.043}, "d": {"delta": 0.659}, "hy": {"delta": 0.442}, "pd": {"delta": 0.157}, "pa": {"delta": 0.087}, "pf": {"delta": 0.204}, "sc": {"delta": 0.428}, "ma": {"delta": 0.042}, "si": {"delta": 0.476}, "l": {"delta": 0.018777778}}}
eval-results/results_mistralai-Mistral-7B-Instruct-v0-1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "mistralai/Mistral-7B-Instruct-v0.1", "model_sha": "main"}, "results": {"hs": {"delta": -0.032}, "d": {"delta": 0.442}, "hy": {"delta": -0.025}, "pd": {"delta": 0.02}, "pa": {"delta": -0.065}, "pf": {"delta": 0.198}, "sc": {"delta": 0.325}, "ma": {"delta": 0.039}, "si": {"delta": 0.563}, "l": {"delta": -0.038999997}}}
eval-results/results_mistralai-Mistral-7B-Instruct-v0-2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "mistralai/Mistral-7B-Instruct-v0.2", "model_sha": "main"}, "results": {"hs": {"delta": 0.149}, "d": {"delta": 0.474}, "hy": {"delta": 0.193}, "pd": {"delta": -0.014}, "pa": {"delta": 0.103}, "pf": {"delta": 0.323}, "sc": {"delta": 0.474}, "ma": {"delta": 0.132}, "si": {"delta": 0.405}, "l": {"delta": -0.004666667}}}
eval-results/results_mistralai-Mistral-7B-Instruct-v0-3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "mistralai/Mistral-7B-Instruct-v0.3", "model_sha": "main"}, "results": {"hs": {"delta": 0.113}, "d": {"delta": 0.312}, "hy": {"delta": 0.044}, "pd": {"delta": -0.006}, "pa": {"delta": 0.042}, "pf": {"delta": 0.251}, "sc": {"delta": 0.287}, "ma": {"delta": 0.142}, "si": {"delta": 0.489}, "l": {"delta": -0.025666667}}}
eval-results/results_mistralai-Mistral-7B-v0-3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "mistralai/Mistral-7B-v0.3", "model_sha": "main"}, "results": {"hs": {"delta": -0.073}, "d": {"delta": 0.44}, "hy": {"delta": -0.214}, "pd": {"delta": -0.013}, "pa": {"delta": 0.067}, "pf": {"delta": 0.16}, "sc": {"delta": 0.252}, "ma": {"delta": -0.019}, "si": {"delta": 0.449}, "l": {"delta": -0.011444445}}}
eval-results/results_mistralai-Mixtral-8x7B-Instruct-v0-1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "mistralai/Mixtral-8x7B-Instruct-v0.1", "model_sha": "main"}, "results": {"hs": {"delta": 0.238}, "d": {"delta": 0.539}, "hy": {"delta": 0.242}, "pd": {"delta": 0.114}, "pa": {"delta": 0.242}, "pf": {"delta": 0.49}, "sc": {"delta": 0.395}, "ma": {"delta": 0.076}, "si": {"delta": 0.388}, "l": {"delta": 0.0125555545}}}
eval-results/results_mistralai-Mixtral-8x7B-v0-1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "mistralai/Mixtral-8x7B-v0.1", "model_sha": "main"}, "results": {"hs": {"delta": 0.014}, "d": {"delta": 0.379}, "hy": {"delta": 0.022}, "pd": {"delta": 0.045}, "pa": {"delta": -0.023}, "pf": {"delta": 0.342}, "sc": {"delta": 0.42}, "ma": {"delta": 0.08}, "si": {"delta": 0.384}, "l": {"delta": -0.06111111}}}
eval-results/results_tiiuae-falcon-7b-instruct.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "tiiuae/falcon-7b-instruct", "model_sha": "main"}, "results": {"hs": {"delta": 0.032}, "d": {"delta": 0.131}, "hy": {"delta": -0.028}, "pd": {"delta": 0.003}, "pa": {"delta": 0.042}, "pf": {"delta": -0.017}, "sc": {"delta": 0.041}, "ma": {"delta": 0.019}, "si": {"delta": 0.145}, "l": {"delta": 0.002888889}}}
eval-results/results_tiiuae-falcon-7b.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"config": {"model_dtype": "float16", "model_name": "tiiuae/falcon-7b", "model_sha": "main"}, "results": {"hs": {"delta": -0.135}, "d": {"delta": 0.145}, "hy": {"delta": -0.356}, "pd": {"delta": -0.07}, "pa": {"delta": 0.043}, "pf": {"delta": -0.027}, "sc": {"delta": 0.167}, "ma": {"delta": 0.08}, "si": {"delta": 0.236}, "l": {"delta": -0.063777775}}}
src/display/utils.py CHANGED
@@ -26,7 +26,7 @@ auto_eval_column_dict = []
26
  auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", True)])
30
  for task in Tasks:
31
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
  # Model information
 
26
  auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Overall ⬆️", "number", True)])
30
  for task in Tasks:
31
  auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
  # Model information
src/envs.py CHANGED
@@ -4,22 +4,19 @@ from huggingface_hub import HfApi
4
 
5
  # Info to change for your repository
6
  # ----------------------------------
7
- TOKEN = os.environ.get("TOKEN") # A read/write token for your org
8
 
9
- OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
  # ----------------------------------
11
 
12
- REPO_ID = f"{OWNER}/leaderboard"
13
- QUEUE_REPO = f"{OWNER}/requests"
14
- RESULTS_REPO = f"{OWNER}/results"
15
 
16
  # If you setup a cache later, just change HF_HOME
17
- CACHE_PATH=os.getenv("HF_HOME", ".")
18
 
19
  # Local caches
20
- EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
  EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
- EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
 
25
  API = HfApi(token=TOKEN)
 
4
 
5
  # Info to change for your repository
6
  # ----------------------------------
7
+ TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
 
9
+ OWNER = "IrinaAbdullaeva" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
  # ----------------------------------
11
 
12
+ REPO_ID = f"{OWNER}/MindShift"
13
+ # RESULTS_REPO = f"{OWNER}/results"
 
14
 
15
  # If you setup a cache later, just change HF_HOME
16
+ CACHE_PATH = os.getenv("HF_HOME", ".")
17
 
18
  # Local caches
 
19
  EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
 
20
  EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
21
 
22
  API = HfApi(token=TOKEN)
src/leaderboard/read_evals.py CHANGED
@@ -16,20 +16,20 @@ from src.submission.check_validity import is_model_on_hub
16
  class EvalResult:
17
  """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
  """
19
- eval_name: str # org_model_precision (uid)
20
- full_model: str # org/model (path on hub)
21
  org: str
22
  model: str
23
- revision: str # commit hash, "" if main
24
  results: dict
25
  precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
  architecture: str = "Unknown"
29
  license: str = "?"
30
  likes: int = 0
31
  num_params: int = 0
32
- date: str = "" # submission date of request file
33
  still_on_hub: bool = False
34
 
35
  @classmethod
@@ -76,7 +76,7 @@ class EvalResult:
76
  if accs.size == 0 or any([acc is None for acc in accs]):
77
  continue
78
 
79
- mean_acc = np.mean(accs) * 100.0
80
  results[task.benchmark] = mean_acc
81
 
82
  return self(
@@ -86,30 +86,14 @@ class EvalResult:
86
  model=model,
87
  results=results,
88
  precision=precision,
89
- revision= config.get("model_sha", ""),
90
  still_on_hub=still_on_hub,
91
  architecture=architecture
92
  )
93
 
94
- def update_with_request_file(self, requests_path):
95
- """Finds the relevant request file for the current model and updates info with it"""
96
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
97
-
98
- try:
99
- with open(request_file, "r") as f:
100
- request = json.load(f)
101
- self.model_type = ModelType.from_str(request.get("model_type", ""))
102
- self.weight_type = WeightType[request.get("weight_type", "Original")]
103
- self.license = request.get("license", "?")
104
- self.likes = request.get("likes", 0)
105
- self.num_params = request.get("params", 0)
106
- self.date = request.get("submitted_time", "")
107
- except Exception:
108
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
109
-
110
  def to_dict(self):
111
  """Converts the Eval Result to a dict compatible with our dataframe display"""
112
- average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
113
  data_dict = {
114
  "eval_name": self.eval_name, # not a column, just a save name,
115
  AutoEvalColumn.precision.name: self.precision.value.name,
@@ -119,7 +103,7 @@ class EvalResult:
119
  AutoEvalColumn.architecture.name: self.architecture,
120
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
121
  AutoEvalColumn.revision.name: self.revision,
122
- AutoEvalColumn.average.name: average,
123
  AutoEvalColumn.license.name: self.license,
124
  AutoEvalColumn.likes.name: self.likes,
125
  AutoEvalColumn.params.name: self.num_params,
@@ -132,29 +116,8 @@ class EvalResult:
132
  return data_dict
133
 
134
 
135
- def get_request_file_for_model(requests_path, model_name, precision):
136
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
- request_files = os.path.join(
138
- requests_path,
139
- f"{model_name}_eval_request_*.json",
140
- )
141
- request_files = glob.glob(request_files)
142
-
143
- # Select correct request file (precision)
144
- request_file = ""
145
- request_files = sorted(request_files, reverse=True)
146
- for tmp_request_file in request_files:
147
- with open(tmp_request_file, "r") as f:
148
- req_content = json.load(f)
149
- if (
150
- req_content["status"] in ["FINISHED"]
151
- and req_content["precision"] == precision.split(".")[-1]
152
- ):
153
- request_file = tmp_request_file
154
- return request_file
155
-
156
-
157
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
  """From the path of the results folder root, extract all needed info for results"""
159
  model_result_filepaths = []
160
 
@@ -163,20 +126,20 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
163
  if len(files) == 0 or any([not f.endswith(".json") for f in files]):
164
  continue
165
 
166
- # Sort the files by date
167
  try:
168
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
 
169
  except dateutil.parser._parser.ParserError:
170
  files = [files[-1]]
171
 
172
  for file in files:
173
- model_result_filepaths.append(os.path.join(root, file))
174
 
175
  eval_results = {}
176
  for model_result_filepath in model_result_filepaths:
177
  # Creation of result
178
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
179
- eval_result.update_with_request_file(requests_path)
180
 
181
  # Store results of same eval together
182
  eval_name = eval_result.eval_name
@@ -188,7 +151,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
188
  results = []
189
  for v in eval_results.values():
190
  try:
191
- v.to_dict() # we test if the dict version is complete
192
  results.append(v)
193
  except KeyError: # not all eval values present
194
  continue
 
16
  class EvalResult:
17
  """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
  """
19
+ eval_name: str # org_model_precision (uid)
20
+ full_model: str # org/model (path on hub)
21
  org: str
22
  model: str
23
+ revision: str # commit hash, "" if main
24
  results: dict
25
  precision: Precision = Precision.Unknown
26
+ model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
+ weight_type: WeightType = WeightType.Original # Original or Adapter
28
  architecture: str = "Unknown"
29
  license: str = "?"
30
  likes: int = 0
31
  num_params: int = 0
32
+ date: str = "" # submission date of request file
33
  still_on_hub: bool = False
34
 
35
  @classmethod
 
76
  if accs.size == 0 or any([acc is None for acc in accs]):
77
  continue
78
 
79
+ mean_acc = np.sum(accs)
80
  results[task.benchmark] = mean_acc
81
 
82
  return self(
 
86
  model=model,
87
  results=results,
88
  precision=precision,
89
+ revision=config.get("model_sha", ""),
90
  still_on_hub=still_on_hub,
91
  architecture=architecture
92
  )
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  def to_dict(self):
95
  """Converts the Eval Result to a dict compatible with our dataframe display"""
96
+ overall = sum([v for v in self.results.values() if v is not None])
97
  data_dict = {
98
  "eval_name": self.eval_name, # not a column, just a save name,
99
  AutoEvalColumn.precision.name: self.precision.value.name,
 
103
  AutoEvalColumn.architecture.name: self.architecture,
104
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
105
  AutoEvalColumn.revision.name: self.revision,
106
+ AutoEvalColumn.average.name: overall,
107
  AutoEvalColumn.license.name: self.license,
108
  AutoEvalColumn.likes.name: self.likes,
109
  AutoEvalColumn.params.name: self.num_params,
 
116
  return data_dict
117
 
118
 
119
+
120
+ def get_raw_eval_results(results_path: str) -> list[EvalResult]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  """From the path of the results folder root, extract all needed info for results"""
122
  model_result_filepaths = []
123
 
 
126
  if len(files) == 0 or any([not f.endswith(".json") for f in files]):
127
  continue
128
 
129
+ # Sort the files by date: old -> new
130
  try:
131
+ files = sorted([os.path.join(root, file) for file in files], key=os.path.getmtime, reverse=False)
132
+ # files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
133
  except dateutil.parser._parser.ParserError:
134
  files = [files[-1]]
135
 
136
  for file in files:
137
+ model_result_filepaths.append(file)
138
 
139
  eval_results = {}
140
  for model_result_filepath in model_result_filepaths:
141
  # Creation of result
142
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
 
143
 
144
  # Store results of same eval together
145
  eval_name = eval_result.eval_name
 
151
  results = []
152
  for v in eval_results.values():
153
  try:
154
+ v.to_dict() # we test if the dict version is complete
155
  results.append(v)
156
  except KeyError: # not all eval values present
157
  continue
src/populate.py CHANGED
@@ -8,9 +8,9 @@ from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
  from src.leaderboard.read_evals import get_raw_eval_results
9
 
10
 
11
- def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  """Creates a dataframe from all the individual experiment results"""
13
- raw_data = get_raw_eval_results(results_path, requests_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
 
16
  df = pd.DataFrame.from_records(all_data_json)
 
8
  from src.leaderboard.read_evals import get_raw_eval_results
9
 
10
 
11
+ def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
  """Creates a dataframe from all the individual experiment results"""
13
+ raw_data = get_raw_eval_results(results_path)
14
  all_data_json = [v.to_dict() for v in raw_data]
15
 
16
  df = pd.DataFrame.from_records(all_data_json)