aaditya commited on
Commit
4af3e29
1 Parent(s): 79e5241

Update src/display/about.py

Browse files
Files changed (1) hide show
  1. src/display/about.py +9 -1
src/display/about.py CHANGED
@@ -1,7 +1,15 @@
1
  from src.display.utils import ModelType
2
 
3
 
4
- TITLE = """<h1 align="center" id="space-title"> 🧬 Open Medical LLM Leaderboard 🩺</h1>"""
 
 
 
 
 
 
 
 
5
 
6
  INTRODUCTION_TEXT = """
7
  🩺 The Open Medical LLM Leaderboard aims to track, rank and evaluate the performance of large language models (LLMs) on medical question answering tasks. It evaluates LLMs across a diverse array of medical datasets, including MedQA (USMLE), PubMedQA, MedMCQA, and subsets of MMLU related to medicine and biology. The leaderboard offers a comprehensive assessment of each model's medical knowledge and question answering capabilities.
 
1
  from src.display.utils import ModelType
2
 
3
 
4
+ TITLE = """
5
+
6
+ <h1 align="center" style="color: #1a237e;"> Open Medical-LLM Leaderboard</h1>
7
+
8
+ <div style="text-align: center;">
9
+ <img src="https://raw.githubusercontent.com/monk1337/MultiMedQA/main/assets/logs.png" alt="Descriptive Alt Text" style="display: block; margin: auto; height: 160px;">
10
+ </div>
11
+
12
+ """
13
 
14
  INTRODUCTION_TEXT = """
15
  🩺 The Open Medical LLM Leaderboard aims to track, rank and evaluate the performance of large language models (LLMs) on medical question answering tasks. It evaluates LLMs across a diverse array of medical datasets, including MedQA (USMLE), PubMedQA, MedMCQA, and subsets of MMLU related to medicine and biology. The leaderboard offers a comprehensive assessment of each model's medical knowledge and question answering capabilities.