File size: 2,752 Bytes
d317f64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a70555b
d317f64
 
 
 
 
 
 
 
 
 
a70555b
d317f64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69

import json
import logging
import os
import subprocess
import time

import pandas as pd
from huggingface_hub import snapshot_download

from src.envs import EVAL_RESULTS_PATH

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

def time_diff_wrapper(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        diff = end_time - start_time
        logging.info(f"Time taken for {func.__name__}: {diff} seconds")
        return result
    return wrapper

@time_diff_wrapper
def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3, backoff_factor=1.5):
    """Download dataset with exponential backoff retries."""
    attempt = 0
    while attempt < max_attempts:
        try:
            logging.info(f"Downloading {repo_id} to {local_dir}")
            snapshot_download(
                repo_id=repo_id,
                local_dir=local_dir,
                repo_type=repo_type,
                tqdm_class=None,
                token=os.environ.get("HF_TOKEN_PRIVATE"),
                etag_timeout=30,
                max_workers=8,
            )
            logging.info("Download successful")
            return
        except Exception as e:
            wait_time = backoff_factor ** attempt
            logging.error(f"Error downloading {repo_id}: {e}, retrying in {wait_time}s")
            time.sleep(wait_time)
            attempt += 1
    logging.error(f"Failed to download {repo_id} after {max_attempts} attempts")

def build_leadearboard_df():
    """Initializes the application space, loading only necessary data."""
    # Check ENV LEADERBOARD_DOWNLOAD if wee need to download the leaderboard
    if os.getenv("LEADERBOARD_DOWNLOAD", "True") == "True":
        # These downloads only occur on full initialization
        # try:
            # download_dataset(QUEUE_REPO, EVAL_REQUESTS_PATH)
            # download_dataset(DYNAMIC_INFO_REPO, DYNAMIC_INFO_PATH)
            download_dataset("Vikhrmodels/openbench-eval", EVAL_RESULTS_PATH)
            # print(subprocess.Popen('ls src'))
            subprocess.run(['rsync', '-avzP', '--ignore-existing', f'{EVAL_RESULTS_PATH[2:]}/external/*', 'src/gen/data/arena-hard-v0.1/model_answer/'], check=False)
            subprocess.run(['rsync', '-avzP', '--ignore-existing', f'{EVAL_RESULTS_PATH[2:]}/model_judgment/*', 'src/gen/data/arena-hard-v0.1/model_judgement/'], check=False)
        # except Exception:
        #     restart_space()

    # Always retrieve the leaderboard DataFrame
    leaderboard_df = pd.DataFrame.from_records(json.load(open('eval-results/evals/upd.json','r')))
    return leaderboard_df.copy()