Spaces:
Runtime error
Runtime error
pminervini
commited on
Commit
•
18d23cd
1
Parent(s):
d871986
update
Browse files
src/backend/manage_requests.py
CHANGED
@@ -5,6 +5,7 @@ from typing import Optional
|
|
5 |
|
6 |
from huggingface_hub import HfApi, snapshot_download
|
7 |
|
|
|
8 |
@dataclass
|
9 |
class EvalRequest:
|
10 |
model: str
|
@@ -22,7 +23,7 @@ class EvalRequest:
|
|
22 |
params: Optional[int] = None
|
23 |
license: Optional[str] = ""
|
24 |
|
25 |
-
def get_model_args(self):
|
26 |
model_args = f"pretrained={self.model},revision={self.revision},parallelize=True"
|
27 |
|
28 |
if self.precision in ["float16", "float32", "bfloat16"]:
|
|
|
5 |
|
6 |
from huggingface_hub import HfApi, snapshot_download
|
7 |
|
8 |
+
|
9 |
@dataclass
|
10 |
class EvalRequest:
|
11 |
model: str
|
|
|
23 |
params: Optional[int] = None
|
24 |
license: Optional[str] = ""
|
25 |
|
26 |
+
def get_model_args(self) -> str:
|
27 |
model_args = f"pretrained={self.model},revision={self.revision},parallelize=True"
|
28 |
|
29 |
if self.precision in ["float16", "float32", "bfloat16"]:
|
src/backend/run_eval_suite.py
CHANGED
@@ -21,6 +21,7 @@ def run_evaluation(eval_request: EvalRequest, task_names, num_fewshot, batch_siz
|
|
21 |
task_names = utils.pattern_match(task_names, tasks.ALL_TASKS)
|
22 |
|
23 |
print(f"Selected Tasks: {task_names}")
|
|
|
24 |
|
25 |
results = evaluator.simple_evaluate(model="hf-auto", # "hf-causal-experimental", # "hf-causal"
|
26 |
model_args=eval_request.get_model_args(),
|
|
|
21 |
task_names = utils.pattern_match(task_names, tasks.ALL_TASKS)
|
22 |
|
23 |
print(f"Selected Tasks: {task_names}")
|
24 |
+
print(f"Eval Request: {eval_request.get_model_args()}")
|
25 |
|
26 |
results = evaluator.simple_evaluate(model="hf-auto", # "hf-causal-experimental", # "hf-causal"
|
27 |
model_args=eval_request.get_model_args(),
|