File size: 4,321 Bytes
efeee6d
314f91a
95f85ed
acd8e8a
7e186c9
 
37b74a1
efeee6d
 
 
 
 
 
314f91a
efeee6d
046ddc7
 
 
 
 
 
1ffc326
37b74a1
b899767
 
efeee6d
 
2157fea
 
 
 
 
58733e4
efeee6d
7e186c9
 
 
0227006
0b77b16
 
acd8e8a
 
 
8657246
577338b
acd8e8a
 
 
 
 
f257792
acd8e8a
 
0b77b16
acd8e8a
 
0b77b16
acd8e8a
efeee6d
0227006
d313dbd
 
 
9833cdb
d16cee2
d313dbd
 
8c49cb6
d313dbd
 
 
 
 
 
 
 
 
8c49cb6
b323764
d313dbd
 
 
 
 
 
 
 
b323764
d313dbd
 
 
 
8c49cb6
 
d16cee2
58733e4
2a73469
 
fa821bd
9a93ff5
 
 
 
 
 
 
 
9833cdb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from dataclasses import dataclass
from enum import Enum

import yaml
import os


@dataclass
class Task:
    benchmark: str
    metric: str
    col_name: str


class Tasks(Enum):
    basic_understanding = Task("Basic Understanding", "acc", "Basic Understanding")
    contextual_analysis = Task("Contextual Analysis", "acc", "Contextual Analysis")
    deeper_implications = Task("Deeper Implications", "acc", "Deeper Implications")
    broader_implications = Task("Broader Implications", "acc", "Broader Implications")
    further_insights = Task("Further Insights", "acc", "Further Insights")


NUM_FEWSHOT = 0  # Change with your few shot
# ---------------------------------------------------


# Your leaderboard name
TITLE = """<body>
<!-- Existing Header Content -->
<h1 align="center" id="space-title">Multimodal LiveBench</h1>
<h3 align="center">Zero-Contamination Evaluation for Multimodal Models on Lively Updated Internet Content</h3>
</body>"""

# What does your leaderboard evaluate?

with open(os.path.join(os.path.dirname(__file__), "about.md"), "r") as f:
    INTRODUCTION_TEXT = f.read()


def get_link(item):  # name, icon, url):
    name = item["name"]
    icon = item.get("icon", None)
    url = item.get("url", "#")
    if icon.endswith(".svg"):
        icon_tag = f'<img src="{icon}" alt="{name}" style="height: 18px; width: 18px; display: inline;">'
    elif icon.startswith("fa-"):
        icon_tag = f'<i class="{icon}"></i>'
    elif not icon or icon == "":
        icon_tag = ""
    else:
        icon_tag = icon
    return f'{icon_tag} <a href="{url}" target="_blank">{name}</a>'


with open(os.path.join(os.path.dirname(__file__), "links.yaml"), "r", encoding="utf-8") as f:
    links = yaml.safe_load(f)
    LINKS = "<center>" + " | ".join([get_link(item) for item in links]) + "</center>"

# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
## How it works

## Reproducibility
To reproduce our results, here is the commands you can run:

"""

EVALUATION_QUEUE_TEXT = """
## Some good practices before submitting a model

### 1) Make sure you can load your model and tokenizer using AutoClasses:
```python
from transformers import AutoConfig, AutoModel, AutoTokenizer
config = AutoConfig.from_pretrained("your model name", revision=revision)
model = AutoModel.from_pretrained("your model name", revision=revision)
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
```
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.

Note: make sure your model is public!
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!

### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!

### 3) Make sure your model has an open license!
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗

### 4) Fill up your model card
When we add extra information about models to the leaderboard, it will be automatically taken from the model card

## In case of model failure
If your model is displayed in the `FAILED` category, its execution stopped.
Make sure you have followed the above steps first.
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
"""

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""@misc{zhang2024lmmsevalrealitycheckevaluation,
      title={LMMs-Eval: Reality Check on the Evaluation of Large Multimodal Models}, 
      author={Kaichen Zhang and Bo Li and Peiyuan Zhang and Fanyi Pu and Joshua Adrian Cahyono and Kairui Hu and Shuai Liu and Yuanhan Zhang and Jingkang Yang and Chunyuan Li and Ziwei Liu},
      year={2024},
      eprint={2407.12772},
      archivePrefix={arXiv},
      primaryClass={cs.CL},
      url={https://arxiv.org/abs/2407.12772}, 
}
"""