Spaces:
Runtime error
Runtime error
Fabian Wolf
commited on
Commit
·
0a7c75d
1
Parent(s):
7600afd
change metrics, add model params
Browse files- src/about.py +2 -2
- src/leaderboard/read_evals.py +38 -36
src/about.py
CHANGED
@@ -12,8 +12,8 @@ class Task:
|
|
12 |
# ---------------------------------------------------
|
13 |
class Tasks(Enum):
|
14 |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
-
task0 = Task("
|
16 |
-
task1 = Task("
|
17 |
|
18 |
NUM_FEWSHOT = 0 # Change with your few shot
|
19 |
# ---------------------------------------------------
|
|
|
12 |
# ---------------------------------------------------
|
13 |
class Tasks(Enum):
|
14 |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
|
15 |
+
task0 = Task("metric1", "acc", "Metric1")
|
16 |
+
task1 = Task("metric2", "acc_norm", "Metric2")
|
17 |
|
18 |
NUM_FEWSHOT = 0 # Change with your few shot
|
19 |
# ---------------------------------------------------
|
src/leaderboard/read_evals.py
CHANGED
@@ -43,6 +43,8 @@ class EvalResult:
|
|
43 |
# Precision
|
44 |
precision = Precision.from_str(config.get("model_dtype"))
|
45 |
|
|
|
|
|
46 |
# Get model and org
|
47 |
org_and_model = config.get("model_name", config.get("model_args", None))
|
48 |
org_and_model = org_and_model.split("/", 1)
|
@@ -91,21 +93,21 @@ class EvalResult:
|
|
91 |
architecture=architecture
|
92 |
)
|
93 |
|
94 |
-
def update_with_request_file(self, requests_path):
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
|
110 |
def to_dict(self):
|
111 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
@@ -132,26 +134,26 @@ class EvalResult:
|
|
132 |
return data_dict
|
133 |
|
134 |
|
135 |
-
def get_request_file_for_model(requests_path, model_name, precision):
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
|
156 |
|
157 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
@@ -177,7 +179,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
177 |
# Creation of result
|
178 |
print("Parsing", model_result_filepath)
|
179 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
180 |
-
eval_result.update_with_request_file(requests_path)
|
181 |
|
182 |
# Store results of same eval together
|
183 |
eval_name = eval_result.eval_name
|
|
|
43 |
# Precision
|
44 |
precision = Precision.from_str(config.get("model_dtype"))
|
45 |
|
46 |
+
self.num_params = config.get("model_params", 0)
|
47 |
+
|
48 |
# Get model and org
|
49 |
org_and_model = config.get("model_name", config.get("model_args", None))
|
50 |
org_and_model = org_and_model.split("/", 1)
|
|
|
93 |
architecture=architecture
|
94 |
)
|
95 |
|
96 |
+
# def update_with_request_file(self, requests_path):
|
97 |
+
# """Finds the relevant request file for the current model and updates info with it"""
|
98 |
+
# request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
|
99 |
+
|
100 |
+
# try:
|
101 |
+
# with open(request_file, "r") as f:
|
102 |
+
# request = json.load(f)
|
103 |
+
# self.model_type = ModelType.from_str(request.get("model_type", ""))
|
104 |
+
# self.weight_type = WeightType[request.get("weight_type", "Original")]
|
105 |
+
# self.license = request.get("license", "?")
|
106 |
+
# self.likes = request.get("likes", 0)
|
107 |
+
# self.num_params = request.get("params", 0)
|
108 |
+
# self.date = request.get("submitted_time", "")
|
109 |
+
# except Exception:
|
110 |
+
# print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
|
111 |
|
112 |
def to_dict(self):
|
113 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
|
|
134 |
return data_dict
|
135 |
|
136 |
|
137 |
+
# def get_request_file_for_model(requests_path, model_name, precision):
|
138 |
+
# """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
139 |
+
# request_files = os.path.join(
|
140 |
+
# requests_path,
|
141 |
+
# f"{model_name}_eval_request_*.json",
|
142 |
+
# )
|
143 |
+
# request_files = glob.glob(request_files)
|
144 |
+
|
145 |
+
# # Select correct request file (precision)
|
146 |
+
# request_file = ""
|
147 |
+
# request_files = sorted(request_files, reverse=True)
|
148 |
+
# for tmp_request_file in request_files:
|
149 |
+
# with open(tmp_request_file, "r") as f:
|
150 |
+
# req_content = json.load(f)
|
151 |
+
# if (
|
152 |
+
# req_content["status"] in ["FINISHED"]
|
153 |
+
# and req_content["precision"] == precision.split(".")[-1]
|
154 |
+
# ):
|
155 |
+
# request_file = tmp_request_file
|
156 |
+
# return request_file
|
157 |
|
158 |
|
159 |
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
|
|
|
179 |
# Creation of result
|
180 |
print("Parsing", model_result_filepath)
|
181 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
182 |
+
# eval_result.update_with_request_file(requests_path)
|
183 |
|
184 |
# Store results of same eval together
|
185 |
eval_name = eval_result.eval_name
|