Omartificial-Intelligence-Space
commited on
Commit
•
5eeb4d8
1
Parent(s):
eb8e45b
update about
Browse files- src/about.py +14 -65
src/about.py
CHANGED
@@ -1,72 +1,21 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
from enum import Enum
|
|
|
3 |
|
4 |
@dataclass
|
5 |
-
class
|
6 |
benchmark: str
|
7 |
-
metric: str
|
8 |
col_name: str
|
|
|
9 |
|
10 |
-
|
11 |
-
# Select your tasks here
|
12 |
-
# ---------------------------------------------------
|
13 |
class Tasks(Enum):
|
14 |
-
#
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
# What does your leaderboard evaluate?
|
27 |
-
INTRODUCTION_TEXT = """
|
28 |
-
Intro text
|
29 |
-
"""
|
30 |
-
|
31 |
-
# Which evaluations are you running? how can people reproduce what you have?
|
32 |
-
LLM_BENCHMARKS_TEXT = f"""
|
33 |
-
## How it works
|
34 |
-
|
35 |
-
## Reproducibility
|
36 |
-
To reproduce our results, here is the commands you can run:
|
37 |
-
|
38 |
-
"""
|
39 |
-
|
40 |
-
EVALUATION_QUEUE_TEXT = """
|
41 |
-
## Some good practices before submitting a model
|
42 |
-
|
43 |
-
### 1) Make sure you can load your model and tokenizer using AutoClasses:
|
44 |
-
```python
|
45 |
-
from transformers import AutoConfig, AutoModel, AutoTokenizer
|
46 |
-
config = AutoConfig.from_pretrained("your model name", revision=revision)
|
47 |
-
model = AutoModel.from_pretrained("your model name", revision=revision)
|
48 |
-
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
|
49 |
-
```
|
50 |
-
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
|
51 |
-
|
52 |
-
Note: make sure your model is public!
|
53 |
-
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
|
54 |
-
|
55 |
-
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
|
56 |
-
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
|
57 |
-
|
58 |
-
### 3) Make sure your model has an open license!
|
59 |
-
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
|
60 |
-
|
61 |
-
### 4) Fill up your model card
|
62 |
-
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
|
63 |
-
|
64 |
-
## In case of model failure
|
65 |
-
If your model is displayed in the `FAILED` category, its execution stopped.
|
66 |
-
Make sure you have followed the above steps first.
|
67 |
-
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
|
68 |
-
"""
|
69 |
-
|
70 |
-
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
|
71 |
-
CITATION_BUTTON_TEXT = r"""
|
72 |
-
"""
|
|
|
|
|
1 |
from enum import Enum
|
2 |
+
from dataclasses import dataclass
|
3 |
|
4 |
@dataclass
|
5 |
+
class TaskInfo:
|
6 |
benchmark: str
|
|
|
7 |
col_name: str
|
8 |
+
metric: str
|
9 |
|
|
|
|
|
|
|
10 |
class Tasks(Enum):
|
11 |
+
# Replace these with actual subjects from your dataset
|
12 |
+
History = TaskInfo(benchmark='History', col_name='History', metric='accuracy')
|
13 |
+
Mathematics = TaskInfo(benchmark='Mathematics', col_name='Mathematics', metric='accuracy')
|
14 |
+
Science = TaskInfo(benchmark='Science', col_name='Science', metric='accuracy')
|
15 |
+
Geography = TaskInfo(benchmark='Geography', col_name='Geography', metric='accuracy')
|
16 |
+
Literature = TaskInfo(benchmark='Literature', col_name='Literature', metric='accuracy')
|
17 |
+
Art = TaskInfo(benchmark='Art', col_name='Art', metric='accuracy')
|
18 |
+
Physics = TaskInfo(benchmark='Physics', col_name='Physics', metric='accuracy')
|
19 |
+
Chemistry = TaskInfo(benchmark='Chemistry', col_name='Chemistry', metric='accuracy')
|
20 |
+
Biology = TaskInfo(benchmark='Biology', col_name='Biology', metric='accuracy')
|
21 |
+
ComputerScience = TaskInfo(benchmark='Computer Science', col_name='Computer Science', metric='accuracy')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|