Spaces:
Paused
Paused
File size: 853 Bytes
efeee6d 314f91a 95f85ed efeee6d 314f91a 08ae6c5 b899767 efeee6d 943f952 1ffc326 d313dbd 08ae6c5 2a73469 b686823 4d998ac adf0b2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Change for your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
task0 = Task("anli_r1", "acc", "ANLI")
task1 = Task("logiqa", "acc_norm", "LogiQA")
NUM_FEWSHOT = 0 # Change with your few shot
TASKS_HARNESS = [task.value.benchmark for task in Tasks]
# ---------------------------------------------------
# TASKS_LIGHTEVAL = "lighteval|anli:r1|0|0,lighteval|logiqa|0|0"
tasks = ['heq-qa-tlnls', 'sentiment-acc', 'winograd-acc', 'he-en-trans-bleu', 'snli-acc', 'ilfacts-acc']
TASKS_LIGHTEVAL = ','.join(f'custom|{t}|0|0' for t in tasks)# + ',leaderboard|arc:challenge|0|0'
|