from pathlib import Path banner_url = "https://huggingface.co/spaces/WildEval/WildBench-Leaderboard/resolve/main/%E2%80%8Eleaderboard_logo_v2.png" # the same repo here. BANNER = f'
Banner
' INTRODUCTION_TEXT= """ # OSQ Benchmark (Evaluating LLMs with OSQs and MCQs) 🔗 [Website](https://github.com/VILA-Lab/Open-LLM-Leaderboard) | 💻 [GitHub](https://github.com/VILA-Lab/Open-LLM-Leaderboard) | 📖 [Paper](#) | 🐦 [X1](https://x.com/open_llm_lb) | 🐦 [X2](https://x.com/szq0214) > ### Open-LLM-Leaderboard,for evaluating large language models (LLMs) by transitioning from multiple-choice questions (MCQs) to open-style questions. This approach addresses the inherent biases and limitations of MCQs, such as selection bias and the effect of random guessing. By utilizing open-style questions, the framework aims to provide a more accurate assessment of LLMs' abilities across various benchmarks and ensure that the evaluation reflects true capabilities, particularly in terms of language understanding and reasoning. """ CITATION_TEXT = """@article{, title={Open-LLM-Leaderboard: From Multi-choice to Open-style Questions for LLMs Evaluation, Benchmark, and Arena}, author={Aidar Myrzakhan, Sondos Mahmoud Bsharat, Zhiqiang Shen}, journal={arXiv preprint }, year={2024}, } """