Omartificial-Intelligence-Space
commited on
Commit
•
bdb77ab
1
Parent(s):
1193d13
update populate
Browse files- src/populate.py +13 -4
src/populate.py
CHANGED
@@ -4,7 +4,8 @@ import os
|
|
4 |
import pandas as pd
|
5 |
import json
|
6 |
|
7 |
-
from src.display.utils import COLUMNS, EVAL_COLS
|
|
|
8 |
|
9 |
def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols):
|
10 |
# Initialize an empty DataFrame
|
@@ -12,7 +13,11 @@ def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_co
|
|
12 |
|
13 |
# Load evaluation results from JSON files
|
14 |
if os.path.exists(eval_results_path):
|
15 |
-
result_files = [
|
|
|
|
|
|
|
|
|
16 |
data_list = []
|
17 |
for file in result_files:
|
18 |
with open(file, 'r') as f:
|
@@ -44,7 +49,11 @@ def get_evaluation_queue_df(eval_requests_path, eval_cols):
|
|
44 |
|
45 |
# Load evaluation requests from JSON files
|
46 |
if os.path.exists(eval_requests_path):
|
47 |
-
request_files = [
|
|
|
|
|
|
|
|
|
48 |
data_list = []
|
49 |
for file in request_files:
|
50 |
with open(file, 'r') as f:
|
@@ -57,4 +66,4 @@ def get_evaluation_queue_df(eval_requests_path, eval_cols):
|
|
57 |
running_df = df[df['status'] == 'running']
|
58 |
pending_df = df[df['status'] == 'pending']
|
59 |
|
60 |
-
return finished_df, running_df, pending_df
|
|
|
4 |
import pandas as pd
|
5 |
import json
|
6 |
|
7 |
+
from src.display.utils import COLUMNS, EVAL_COLS, Tasks
|
8 |
+
from src.envs import EVAL_RESULTS_PATH # Removed FIXED_QUESTIONS_FILE import
|
9 |
|
10 |
def get_leaderboard_df(eval_results_path, eval_requests_path, cols, benchmark_cols):
|
11 |
# Initialize an empty DataFrame
|
|
|
13 |
|
14 |
# Load evaluation results from JSON files
|
15 |
if os.path.exists(eval_results_path):
|
16 |
+
result_files = [
|
17 |
+
os.path.join(eval_results_path, f)
|
18 |
+
for f in os.listdir(eval_results_path)
|
19 |
+
if f.endswith('.json')
|
20 |
+
]
|
21 |
data_list = []
|
22 |
for file in result_files:
|
23 |
with open(file, 'r') as f:
|
|
|
49 |
|
50 |
# Load evaluation requests from JSON files
|
51 |
if os.path.exists(eval_requests_path):
|
52 |
+
request_files = [
|
53 |
+
os.path.join(eval_requests_path, f)
|
54 |
+
for f in os.listdir(eval_requests_path)
|
55 |
+
if f.endswith('.json')
|
56 |
+
]
|
57 |
data_list = []
|
58 |
for file in request_files:
|
59 |
with open(file, 'r') as f:
|
|
|
66 |
running_df = df[df['status'] == 'running']
|
67 |
pending_df = df[df['status'] == 'pending']
|
68 |
|
69 |
+
return finished_df, running_df, pending_df
|