|
import glob |
|
import json |
|
import math |
|
import os |
|
from dataclasses import dataclass |
|
|
|
import dateutil |
|
import numpy as np |
|
|
|
from src.display.formatting import make_clickable_model |
|
from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType |
|
from src.submission.check_validity import is_model_on_hub |
|
|
|
|
|
@dataclass |
|
class EvalResult: |
|
"""Represents one full evaluation. Built from a combination of the result and request file for a given run. |
|
""" |
|
eval_name: str |
|
org: str |
|
generative_model: str |
|
retrieval_model: str |
|
|
|
results: dict |
|
generative_model_link: str = "" |
|
generative_model_args: dict = None |
|
retrieval_model_link: str = "" |
|
retrieval_model_args: dict = None |
|
precision: Precision = Precision.Unknown |
|
model_type: ModelType = ModelType.Unknown |
|
weight_type: WeightType = WeightType.Original |
|
architecture: str = "Unknown" |
|
license: str = "?" |
|
likes: int = 0 |
|
date: str = "" |
|
still_on_hub: bool = False |
|
|
|
@classmethod |
|
def init_from_json_file(self, json_filepath): |
|
"""Inits the result from the specific model result file""" |
|
with open(json_filepath) as fp: |
|
data = json.load(fp) |
|
|
|
config = data.get("config") |
|
|
|
|
|
|
|
|
|
|
|
eval_name= config.get("eval_name", "") |
|
generative_model = config.get("generative_model", "") |
|
retrieval_model = config.get("retrieval_model", "") |
|
org= config.get("org", "") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
results = {} |
|
for task in Tasks: |
|
task = task.value |
|
|
|
|
|
accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k and task.col_name != "hallucination"]) |
|
if accs.size == 0 or any([acc is None for acc in accs]): |
|
continue |
|
|
|
mean_acc = np.mean(accs) * 100.0 |
|
results[task.benchmark] = mean_acc |
|
results[task.metric] = data["results"][task.benchmark].get(task.metric, None) |
|
if results[task.metric] is not None: |
|
results[task.metric] = results[task.metric] * 100.0 |
|
|
|
|
|
generative_model_args = config.get("generative_model_args", None) |
|
retrieval_model_args = config.get("retrieval_model_args", None) |
|
open_source= True |
|
if not generative_model_args or not generative_model_args.get("open_source", False): |
|
open_source = False |
|
if not retrieval_model_args or not retrieval_model_args.get("open_source", False): |
|
open_source = False |
|
|
|
return self( |
|
eval_name=eval_name, |
|
|
|
org=org, |
|
generative_model=generative_model, |
|
retrieval_model=retrieval_model, |
|
results=results, |
|
generative_model_args=generative_model_args, |
|
retrieval_model_args=retrieval_model_args, |
|
model_type=ModelType.OpenSource if open_source else ModelType.ClosedSource, |
|
|
|
|
|
|
|
|
|
) |
|
|
|
def update_with_request_file(self, requests_path): |
|
"""Finds the relevant request file for the current model and updates info with it""" |
|
request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name) |
|
|
|
try: |
|
with open(request_file, "r") as f: |
|
request = json.load(f) |
|
self.model_type = ModelType.from_str(request.get("model_type", "")) |
|
self.weight_type = WeightType[request.get("weight_type", "Original")] |
|
self.license = request.get("license", "?") |
|
self.likes = request.get("likes", 0) |
|
self.num_params = request.get("params", 0) |
|
self.date = request.get("submitted_time", "") |
|
except Exception: |
|
print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}") |
|
|
|
def to_dict(self): |
|
"""Converts the Eval Result to a dict compatible with our dataframe display""" |
|
data_dict = { |
|
"eval_name": self.eval_name, |
|
|
|
AutoEvalColumn.model_type.name: self.model_type.value.name, |
|
AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol, |
|
|
|
|
|
AutoEvalColumn.generative_model.name: self.generative_model, |
|
AutoEvalColumn.retrieval_model.name: self.retrieval_model, |
|
AutoEvalColumn.generative_model_link.name: make_clickable_model(self.generative_model, self.generative_model_link), |
|
AutoEvalColumn.retrieval_model_link.name: make_clickable_model(self.retrieval_model, self.retrieval_model_link), |
|
|
|
AutoEvalColumn.ret_average.name: self.results["retrieval"], |
|
AutoEvalColumn.gen_average.name: self.results["generation"], |
|
|
|
|
|
|
|
|
|
"Gen#Params (B)": self.generative_model_args.get("num_params", "Unknown"), |
|
"Ret#Params (B)": self.retrieval_model_args.get("num_params", "Unknown"), |
|
} |
|
|
|
for task in Tasks: |
|
data_dict[task.value.col_name] = self.results[task.value.metric] |
|
|
|
return data_dict |
|
|
|
|
|
def get_request_file_for_model(requests_path, model_name, precision): |
|
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED""" |
|
request_files = os.path.join( |
|
requests_path, |
|
f"{model_name}_eval_request_*.json", |
|
) |
|
request_files = glob.glob(request_files) |
|
|
|
|
|
request_file = "" |
|
request_files = sorted(request_files, reverse=True) |
|
for tmp_request_file in request_files: |
|
with open(tmp_request_file, "r") as f: |
|
req_content = json.load(f) |
|
if ( |
|
req_content["status"] in ["FINISHED"] |
|
and req_content["precision"] == precision.split(".")[-1] |
|
): |
|
request_file = tmp_request_file |
|
return request_file |
|
|
|
|
|
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]: |
|
"""From the path of the results folder root, extract all needed info for results""" |
|
model_result_filepaths = [] |
|
print(f"Reading results from {results_path}") |
|
for root, _, files in os.walk(results_path): |
|
|
|
if len(files) == 0 or any([not f.endswith(".json") for f in files]): |
|
continue |
|
|
|
|
|
try: |
|
files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7]) |
|
except dateutil.parser._parser.ParserError: |
|
files = [files[-1]] |
|
|
|
for file in files: |
|
model_result_filepaths.append(os.path.join(root, file)) |
|
print(f"Adding {file}") |
|
|
|
eval_results = {} |
|
for model_result_filepath in model_result_filepaths: |
|
|
|
eval_result = EvalResult.init_from_json_file(model_result_filepath) |
|
|
|
|
|
|
|
eval_name = eval_result.eval_name |
|
if eval_name in eval_results.keys(): |
|
eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None}) |
|
else: |
|
eval_results[eval_name] = eval_result |
|
|
|
results = [] |
|
for v in eval_results.values(): |
|
try: |
|
v.to_dict() |
|
results.append(v) |
|
except KeyError: |
|
import traceback |
|
traceback.print_exc() |
|
continue |
|
|
|
return results |
|
|