Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import os | |
import json | |
from datetime import datetime | |
from huggingface_hub import snapshot_download | |
from src.backend.run_eval_suite import run_evaluation | |
from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request | |
from src.backend.sort_queue import sort_models_by_priority | |
from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND,EVAL_RESULTS_PATH_BACKEND, DEVICE, LIMIT | |
from src.envs import QUEUE_REPO, RESULTS_REPO, API | |
import logging | |
import pprint | |
# TASKS_HARNESS = [task.value.benchmark for task in Tasks] | |
logging.getLogger("openai").setLevel(logging.WARNING) | |
logging.basicConfig(level=logging.ERROR) | |
pp = pprint.PrettyPrinter(width=80) | |
PENDING_STATUS = "PENDING" | |
RUNNING_STATUS = "RUNNING" | |
FINISHED_STATUS = "FINISHED" | |
FAILED_STATUS = "FAILED" | |
snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60) | |
snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) | |
def run_auto_eval(): | |
current_pending_status = [PENDING_STATUS] | |
# pull the eval dataset from the hub and parse any eval requests | |
# check completed evals and set them to finished | |
check_completed_evals(api=API, checked_status=RUNNING_STATUS, completed_status=FINISHED_STATUS, | |
failed_status=FAILED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND, | |
hf_repo_results=RESULTS_REPO, local_dir_results=EVAL_RESULTS_PATH_BACKEND) | |
# Get all eval request that are PENDING, if you want to run other evals, change this parameter | |
eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) | |
# Sort the evals by priority (first submitted first run) | |
eval_requests = sort_models_by_priority(api=API, models=eval_requests) | |
print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests") | |
if len(eval_requests) == 0: | |
return | |
eval_request = eval_requests[0] | |
pp.pprint(eval_request) | |
set_eval_request(api=API, eval_request=eval_request, set_to_status=RUNNING_STATUS, hf_repo=QUEUE_REPO, | |
local_dir=EVAL_REQUESTS_PATH_BACKEND) | |
# results = run_evaluation(eval_request=eval_request, task_names=TASKS_HARNESS, num_fewshot=NUM_FEWSHOT, | |
# batch_size=1, device=DEVICE, no_cache=True, limit=LIMIT) | |
TASKS_HARNESS = [task.value for task in Tasks] | |
for task in TASKS_HARNESS: | |
results = run_evaluation(eval_request=eval_request, task_names=[task.benchmark], num_fewshot=task.num_fewshot, | |
batch_size=1, device=DEVICE, no_cache=True, limit=LIMIT) | |
dumped = json.dumps(results, indent=2) | |
print(dumped) | |
output_path = os.path.join(EVAL_RESULTS_PATH_BACKEND, *eval_request.model.split("/"), f"results_{datetime.now()}.json") | |
os.makedirs(os.path.dirname(output_path), exist_ok=True) | |
with open(output_path, "w") as f: | |
f.write(dumped) | |
API.upload_file(path_or_fileobj=output_path, path_in_repo=f"{eval_request.model}/results_{datetime.now()}.json", | |
repo_id=RESULTS_REPO, repo_type="dataset") | |
set_eval_request(api=API, eval_request=eval_request, set_to_status=FINISHED_STATUS, hf_repo=QUEUE_REPO, | |
local_dir=EVAL_REQUESTS_PATH_BACKEND) | |
# breakpoint() | |
if __name__ == "__main__": | |
run_auto_eval() | |