File size: 3,581 Bytes
8c49cb6
 
 
 
 
df66f6e
314f91a
b1a1395
8c49cb6
 
3dfaf22
c1b8a96
3dfaf22
b1a1395
4e9bbf4
0cb52bf
 
b1a1395
d50e9bd
 
3ca21a7
 
 
 
d50e9bd
582d3da
3ca21a7
4e9bbf4
582d3da
3ca21a7
d50e9bd
3ca21a7
835517f
a684f6e
 
0cb52bf
 
321e9aa
 
c6db7e0
8b28d2b
8c49cb6
 
a684f6e
adb0416
c1b8a96
8c49cb6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eed1ccd
6e6645b
8c49cb6
 
 
6e6645b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import json
import os

import pandas as pd

from src.display.formatting import has_no_nan_values, make_clickable_model
from src.display.utils import AutoEvalColumn, EvalQueueColumn
from src.leaderboard.read_evals import get_raw_eval_results


def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
    """Creates a dataframe from all the individual experiment results"""
    raw_data = get_raw_eval_results(results_path, requests_path)
    all_data_json = [v.to_dict() for v in raw_data]



    df = pd.DataFrame.from_records(all_data_json)
    print(f"DataFrame before NaN filtering: {df}")

    score_cols = [
        'ALT E to J BLEU', 'ALT J to E BLEU', 'WikiCorpus E to J BLEU', 'WikiCorpus J to E BLEU', 
        'XL-Sum JA BLEU', 'XL-Sum ROUGE1', 'XL-Sum ROUGE2', 'XL-Sum ROUGE-Lsum'
    ]
    print(f"Column names before creating DataFrame: {df.columns}")
    
    existing_score_cols = [col for col in score_cols if col in df.columns]
    print(f"Existing score columns: {existing_score_cols}")
    
    # スコア列を100で割り、.4f形式でフォーマット
    print(f"Existing score columns before adjustment: {df[existing_score_cols]}")
    df[existing_score_cols] = (df[existing_score_cols] / 100).applymap(lambda x: f'{x:.4f}')
    df = df.sort_values(by=[AutoEvalColumn.AVG.name], ascending=False)
    df = df[cols].round(decimals=2)
    
    # filter out if any of the benchmarks have not been produced
    df = df[has_no_nan_values(df, benchmark_cols)]
    print(f"Final leaderboard DataFrame:\n{df.head()}")
    print(f"DataFrame shape: {df.shape}")
    
    return df



def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
    """Creates the different dataframes for the evaluation queues requestes"""
    entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
    all_evals = []

    for entry in entries:
        if ".json" in entry:
            file_path = os.path.join(save_path, entry)
            with open(file_path) as fp:
                data = json.load(fp)

            data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
            data[EvalQueueColumn.revision.name] = data.get("revision", "main")

            all_evals.append(data)
        elif ".md" not in entry:
            # this is a folder
            sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
            for sub_entry in sub_entries:
                file_path = os.path.join(save_path, entry, sub_entry)
                with open(file_path) as fp:
                    data = json.load(fp)

                data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
                data[EvalQueueColumn.revision.name] = data.get("revision", "main")
                all_evals.append(data)

    pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
    running_list = [e for e in all_evals if e["status"] == "RUNNING"]
    finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
    failed_list = [e for e in all_evals if e["status"] == "FAILED"]
    df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
    df_running = pd.DataFrame.from_records(running_list, columns=cols)
    df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
    df_failed = pd.DataFrame.from_records(failed_list, columns=cols)
    return df_finished[cols], df_running[cols], df_pending[cols], df_failed[cols]