XufengDuan's picture
update scripts
7db1281
import glob
import json
import os
from dataclasses import dataclass
import numpy as np
import dateutil
import src.display.formatting as formatting
import src.display.utils as utils
import src.submission.check_validity as check_validity
@dataclass
class EvalResult:
eval_name: str # org_model_precision (uid)
full_model: str # org/model (path on hub)
org: str
model: str
revision: str # commit hash, "" if main
results: dict
precision: utils.Precision = utils.Precision.Unknown
model_type: utils.ModelType = utils.ModelType.Unknown # Pretrained, fine tuned, ...
weight_type: utils.WeightType = utils.WeightType.Original # Original or Adapter
architecture: str = "Unknown"
license: str = "?"
likes: int = 0
num_params: int = 0
date: str = "" # submission date of request file
still_on_hub: bool = False
@classmethod
def init_from_json_file(self, json_filepath):
"""Inits the result from the specific model result file"""
with open(json_filepath) as fp:
data = json.load(fp)
print('json_filepath',json_filepath)
print(data)
config = data.get("config")
print(config)
# Precision
precision = utils.Precision.from_str(config.get("model_dtype"))
# Get model and org
full_model = config.get("model_name", config.get("model_args", None))
org, model = full_model.split("/", 1) if "/" in full_model else (None, full_model)
if org:
result_key = f"{org}_{model}_{precision.value.name}"
else:
result_key = f"{model}_{precision.value.name}"
still_on_hub, _, model_config = check_validity.is_model_on_hub(
full_model, config.get("model_sha", "main"), trust_remote_code=True,
test_tokenizer=False)
if model_config:
architecture = ";".join(getattr(model_config, "architectures", ["?"]))
else:
architecture = "?"
# Extract results available in this file (some results are split in several files)
results = {}
for task in utils.Tasks:
#print(task)
task = task.value
#print(task.benchmark)
#print(task.metric)
#print(task.col_name)
#print(task.value)
if isinstance(task.metric, str):
# accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if
# task.benchmark == k and isinstance(v, dict)])
# accs = np.array([np.around(v*100, decimals=0) for k, v in data["results"].items() if task.benchmark == k])
accs = []
import math
for k, v in data["results"].items():
if task.benchmark == k:
if isinstance(v, (int, float)) and not math.isnan(v):
accs.append(np.around(v * 100, decimals=1))
elif isinstance(v, list):
accs.extend([np.around(x * 100, decimals=1) for x in v if
isinstance(x, (int, float)) and not math.isnan(x)])
else:
# 跳过 NaN 或不符合条件的值
accs.append(None)
accs = np.array([x for x in accs if x is not None])
accs = accs[accs != None]
results[task.benchmark] = accs
elif isinstance(task.metric, list):
accs = np.array([str(v.get(task.metric, None)) for k, v in data["results"].items() if
task.benchmark == k and isinstance(v, dict)])
accs = accs[accs != None]
results[task.benchmark] = accs
else:
print(f"Skipping task with unhandled metric type: {type(task.metric)}")
# # We average all scores of a given metric (not all metrics are present in all files)
# accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
#
# results[task.benchmark] = accs
return self(
eval_name=result_key,
full_model=full_model,
org=org,
model=model,
results=results,
precision=precision,
revision= config.get("model_sha", ""),
still_on_hub=still_on_hub,
architecture=architecture
)
def update_with_request_file(self, requests_path):
"""Finds the relevant request file for the current model and updates info with it"""
all_files_before = os.listdir(requests_path)
print("test the variable:", all_files_before)
# print(self.full_model)
#print(self.precision.value.name)
request_file = get_request_file_for_model(requests_path, self.full_model)
# print("file name:",request_file)
#all_files = os.listdir(request_file)
#print("Files in the folder:", all_files)
try:
with open(request_file, "r") as f:
request = json.load(f)
print(request)
self.model_type = utils.ModelType.from_str(request.get("model_type", ""))
#self.weight_type = utils.WeightType[request.get("weight_type", "Original")]
self.license = request.get("license", "?")
self.likes = request.get("likes", 0)
self.num_params = int(float(request.get("params", "0").replace('B', '')))
self.date = request.get("submitted_time", "")
# print(self.license)
print('updated:', self)
except FileNotFoundError:
print(f"Could not find request file for {self.org}/{self.model}")
except json.JSONDecodeError:
print(f"Error decoding JSON in request file for {self.org}/{self.model}")
def to_dict(self):
"""Converts the Eval Result to a dict compatible with our dataframe display"""
data_dict = {
"eval_name": self.eval_name, # not a column, just a save name,
# utils.AutoEvalColumn.precision.name: self.precision.value.name,
# utils.AutoEvalColumn.model_type.name: self.model_type.value.name,
#utils.AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
# utils.AutoEvalColumn.weight_type.name: self.weight_type.value.name,
# utils.AutoEvalColumn.architecture.name: self.architecture,
utils.AutoEvalColumn.model.name: formatting.make_clickable_model(self.full_model),
utils.AutoEvalColumn.dummy.name: self.full_model,
# utils.AutoEvalColumn.revision.name: self.revision,
utils.AutoEvalColumn.license.name: self.license,
utils.AutoEvalColumn.likes.name: self.likes,
utils.AutoEvalColumn.params.name: self.num_params,
# utils.AutoEvalColumn.still_on_hub.name: self.still_on_hub,
}
for task in utils.Tasks:
data_dict[task.value.col_name] = self.results[task.value.benchmark]
return data_dict
def get_request_file_for_model(requests_path, model_name):
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
request_files = os.path.join(
requests_path,
f"{model_name}.json",
)
#request_files = glob.glob(request_files)
# Select correct request file (precision)
# request_file = ""
# request_files = sorted(request_files, reverse=True)
# for tmp_request_file in request_files:
# with open(tmp_request_file, "r") as f:
# req_content = json.load(f)
# # if (
# # req_content["status"] in ["FINISHED"]
# # and req_content["precision"] == precision.split(".")[-1]
# # ):
# # request_file = tmp_request_file
return request_files
def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
"""From the path of the results folder root, extract all needed info for results"""
model_result_filepaths = []
print("results_path", results_path)
for root, _, files in os.walk(results_path):
print("file",files)
for f in files:
if f.endswith(".json"):
model_result_filepaths.extend([os.path.join(root, f)])
# print("model_result_filepaths:", model_result_filepaths)
# exit()
eval_results = {}
for model_result_filepath in model_result_filepaths:
# Creation of result
eval_result = EvalResult.init_from_json_file(model_result_filepath)
# print("request_path:",requests_path)
eval_result.update_with_request_file(requests_path)
# print(eval_result)
# Store results of same eval together
eval_name = eval_result.eval_name
if eval_name in eval_results.keys():
eval_results[eval_name].results.update({k: v for k, v in
eval_result.results.items() if v is not None})
else:
eval_results[eval_name] = eval_result
results = []
for v in eval_results.values():
try:
v.to_dict() # we test if the dict version is complete
results.append(v)
except KeyError: # not all eval values present
continue
return results