Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 2,893 Bytes
2a5f9fb c0fa950 df66f6e 2a5f9fb c0fa950 b81a33b 2a5f9fb c0fa950 2a5f9fb c0fa950 7499b46 c0fa950 2a5f9fb 1841941 2a5f9fb b81a33b 2a5f9fb c0fa950 395eff6 0c7ef71 2a5f9fb c0fa950 2a5f9fb c0fa950 2a5f9fb b81a33b 1253c3b 71ecfbb b81a33b c0fa950 71ecfbb 8aaf0e7 de3b367 8aaf0e7 2a5f9fb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import os
from yaml import safe_load
from huggingface_hub import HfApi
TASK_CONFIG_NAME = os.getenv("TASK_CONFIG", "pt_config")
TASK_CONFIG_PATH = os.path.join('tasks_config', TASK_CONFIG_NAME + ".yaml")
with open(TASK_CONFIG_PATH, 'r', encoding='utf-8') as f:
TASK_CONFIG = safe_load(f)
def get_config(name, default):
res = None
if name in os.environ:
res = os.environ[name]
elif 'config' in TASK_CONFIG:
res = TASK_CONFIG['config'].get(name, None)
if res is None:
return default
return res
def str2bool(v):
return str(v).lower() in ("yes", "true", "t", "1")
# clone / pull the lmeh eval data
H4_TOKEN = get_config("H4_TOKEN", None)
LEADERBOARD_NAME = get_config("LEADERBOARD_NAME", "Open LLM Leaderboard")
REPO_ID = get_config("REPO_ID", "HuggingFaceH4/open_llm_leaderboard")
QUEUE_REPO = get_config("QUEUE_REPO", "open-llm-leaderboard/requests")
DYNAMIC_INFO_REPO = get_config("DYNAMIC_INFO_REPO", "open-llm-leaderboard/dynamic_model_information")
RESULTS_REPO = get_config("RESULTS_REPO", "open-llm-leaderboard/results")
RAW_RESULTS_REPO = get_config("RAW_RESgit sULTS_REPO", None)
PRIVATE_QUEUE_REPO = QUEUE_REPO
PRIVATE_RESULTS_REPO = RESULTS_REPO
#PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
#PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
IS_PUBLIC = str2bool(get_config("IS_PUBLIC", True))
CACHE_PATH=get_config("HF_HOME", ".")
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
DYNAMIC_INFO_PATH = os.path.join(CACHE_PATH, "dynamic-info")
DYNAMIC_INFO_FILE_PATH = os.path.join(DYNAMIC_INFO_PATH, "model_infos.json")
EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private"
EVAL_RESULTS_PATH_PRIVATE = "eval-results-private"
PATH_TO_COLLECTION = get_config("PATH_TO_COLLECTION", "open-llm-leaderboard/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03")
# Rate limit variables
RATE_LIMIT_PERIOD = int(get_config("RATE_LIMIT_PERIOD", 7))
RATE_LIMIT_QUOTA = int(get_config("RATE_LIMIT_QUOTA", 5))
HAS_HIGHER_RATE_LIMIT = get_config("HAS_HIGHER_RATE_LIMIT", "TheBloke").split(',')
TRUST_REMOTE_CODE = str2bool(get_config("TRUST_REMOTE_CODE", False))
#Set if you want to get an extra field with the average eval results from the HF leaderboard
GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS = str2bool(get_config("GET_ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS", False))
ORIGINAL_HF_LEADERBOARD_RESULTS_REPO = get_config("ORIGINAL_HF_LEADERBOARD_RESULTS_REPO", "open-llm-leaderboard/results")
ORIGINAL_HF_LEADERBOARD_EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, 'original_results')
SHOW_INCOMPLETE_EVALS = str2bool(get_config("SHOW_INCOMPLETE_EVALS", False))
REQUIRE_MODEL_CARD = str2bool(get_config("REQUIRE_MODEL_CARD", True))
REQUIRE_MODEL_LICENSE = str2bool(get_config("REQUIRE_MODEL_LICENSE", True))
API = HfApi(token=H4_TOKEN)
|