import os import requests from huggingface_hub import HfApi # Info to change for your repository # ---------------------------------- TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org OWNER = "llm-jp" # Change to your org - don't forget to create a results and request dataset, with the correct format! # ---------------------------------- REPO_ID = f"{OWNER}/open-japanese-llm-leaderboard" QUEUE_REPO = f"{OWNER}/requests" RESULTS_REPO = f"{OWNER}/results" # If you setup a cache later, just change HF_HOME CACHE_PATH = os.getenv("HF_HOME", ".") # Local caches EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue") EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk") EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk") # vllm version def get_latest_vllm_version() -> str: """GitHubからvllmの最新バージョンを取得する""" url = "https://api.github.com/repos/vllm-project/vllm/releases/latest" response = requests.get(url) if response.status_code == 200: latest_release = response.json() return latest_release["tag_name"] return None # APIリクエストが失敗した場合はNoneを返す VLLM_CURRENT_VERSION = get_latest_vllm_version() API = HfApi(token=TOKEN)