hi-melnikov commited on
Commit
3597960
1 Parent(s): 1bd2e0f

Update src/leaderboard/build_leaderboard.py

Browse files
src/leaderboard/build_leaderboard.py CHANGED
@@ -61,7 +61,7 @@ def download_openbench():
61
  print("\nInternal models in openbench-eval:")
62
  subprocess.run(["ls", f"{DATA_ARENA_PATH}/model_answers/internal/"], check=False)
63
 
64
- print("\External models in openbench-eval:")
65
  subprocess.run(["ls", f"{DATA_ARENA_PATH}/model_answers/external/"], check=False)
66
 
67
  print("\nJudgement in openbench-eval")
@@ -70,6 +70,6 @@ def download_openbench():
70
 
71
  def build_leadearboard_df():
72
  # Retrieve the leaderboard DataFrame
73
- with open(f"{HF_HOME}/data/leaderboard.jsong", "r", encoding="utf-8") as eval_file:
74
  leaderboard_df = pd.DataFrame.from_records(json.load(eval_file))
75
  return leaderboard_df.copy()
 
61
  print("\nInternal models in openbench-eval:")
62
  subprocess.run(["ls", f"{DATA_ARENA_PATH}/model_answers/internal/"], check=False)
63
 
64
+ print("\nExternal models in openbench-eval:")
65
  subprocess.run(["ls", f"{DATA_ARENA_PATH}/model_answers/external/"], check=False)
66
 
67
  print("\nJudgement in openbench-eval")
 
70
 
71
  def build_leadearboard_df():
72
  # Retrieve the leaderboard DataFrame
73
+ with open(f"{HF_HOME}/data/leaderboard.json", "r", encoding="utf-8") as eval_file:
74
  leaderboard_df = pd.DataFrame.from_records(json.load(eval_file))
75
  return leaderboard_df.copy()