nan commited on
Commit
a0c2cea
1 Parent(s): 2edd122

fix: fix bug in loading more data

Browse files
Files changed (3) hide show
  1. app.py +1 -1
  2. src/envs.py +1 -2
  3. src/leaderboard/read_evals.py +6 -2
app.py CHANGED
@@ -11,7 +11,7 @@ from src.about import (
11
  from src.display.css_html_js import custom_css
12
  from src.leaderboard.read_evals import get_raw_eval_results, get_leaderboard_df
13
 
14
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, REPO_ID, RESULTS_REPO, TOKEN
15
  from utils import update_table, update_metric, update_table_long_doc, upload_file, get_default_cols
16
  from src.benchmarks import DOMAIN_COLS_QA, LANG_COLS_QA, DOMAIN_COLS_LONG_DOC, LANG_COLS_LONG_DOC, metric_list
17
 
 
11
  from src.display.css_html_js import custom_css
12
  from src.leaderboard.read_evals import get_raw_eval_results, get_leaderboard_df
13
 
14
+ from src.envs import API, EVAL_RESULTS_PATH, REPO_ID, RESULTS_REPO, TOKEN
15
  from utils import update_table, update_metric, update_table_long_doc, upload_file, get_default_cols
16
  from src.benchmarks import DOMAIN_COLS_QA, LANG_COLS_QA, DOMAIN_COLS_LONG_DOC, LANG_COLS_LONG_DOC, metric_list
17
 
src/envs.py CHANGED
@@ -16,7 +16,6 @@ RESULTS_REPO = f"{OWNER}/results"
16
  CACHE_PATH = os.getenv("HF_HOME", ".")
17
 
18
  # Local caches
19
- EVAL_REQUESTS_PATH = "/Users/nanwang/Codes/huggingface/nan/leaderboard/toys/toydata/requests" # os.path.join(CACHE_PATH, "eval-queue")
20
- EVAL_RESULTS_PATH = "/Users/nanwang/Codes/huggingface/nan/leaderboard/toys/toydata/results" #os.path.join(CACHE_PATH, "eval-results")
21
 
22
  API = HfApi(token=TOKEN)
 
16
  CACHE_PATH = os.getenv("HF_HOME", ".")
17
 
18
  # Local caches
19
+ EVAL_RESULTS_PATH = "/Users/nanwang/Codes/huggingface/nan/results/demo-leaderboard" #os.path.join(CACHE_PATH, "eval-results")
 
20
 
21
  API = HfApi(token=TOKEN)
src/leaderboard/read_evals.py CHANGED
@@ -92,7 +92,7 @@ class FullEvalResult:
92
  results[eval_result.eval_name][COL_NAME_RETRIEVAL_MODEL] = self.retrieval_model
93
  results[eval_result.eval_name][COL_NAME_RERANKING_MODEL] = self.reranking_model
94
 
95
- print(f'result loaded: {eval_result.eval_name}')
96
  for result in eval_result.results:
97
  # add result for each domain, language, and dataset
98
  domain = result["domain"]
@@ -127,7 +127,11 @@ def get_raw_eval_results(results_path: str) -> List[FullEvalResult]:
127
  eval_results = {}
128
  for model_result_filepath in model_result_filepaths:
129
  # create evaluation results
130
- eval_result = FullEvalResult.init_from_json_file(model_result_filepath)
 
 
 
 
131
  print(f'file loaded: {model_result_filepath}')
132
  eval_name = eval_result.eval_name
133
  eval_results[eval_name] = eval_result
 
92
  results[eval_result.eval_name][COL_NAME_RETRIEVAL_MODEL] = self.retrieval_model
93
  results[eval_result.eval_name][COL_NAME_RERANKING_MODEL] = self.reranking_model
94
 
95
+ # print(f'result loaded: {eval_result.eval_name}')
96
  for result in eval_result.results:
97
  # add result for each domain, language, and dataset
98
  domain = result["domain"]
 
127
  eval_results = {}
128
  for model_result_filepath in model_result_filepaths:
129
  # create evaluation results
130
+ try:
131
+ eval_result = FullEvalResult.init_from_json_file(model_result_filepath)
132
+ except UnicodeDecodeError as e:
133
+ print(f"loading file failed. {model_result_filepath}")
134
+ continue
135
  print(f'file loaded: {model_result_filepath}')
136
  eval_name = eval_result.eval_name
137
  eval_results[eval_name] = eval_result