Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
fix local debug
Browse files- backend-cli.py +1 -2
backend-cli.py
CHANGED
@@ -94,7 +94,6 @@ def request_to_result_name(request: EvalRequest) -> str:
|
|
94 |
|
95 |
def process_evaluation(task: Task, eval_request: EvalRequest) -> dict:
|
96 |
batch_size = "auto"
|
97 |
-
|
98 |
try:
|
99 |
results = run_evaluation(eval_request=eval_request, task_names=[task.benchmark], num_fewshot=task.num_fewshot,
|
100 |
batch_size=batch_size, device=DEVICE, use_cache=None, limit=LIMIT)
|
@@ -266,7 +265,7 @@ def process_pending_requests() -> bool:
|
|
266 |
if __name__ == "__main__":
|
267 |
wait = True
|
268 |
hard_task_lst = None
|
269 |
-
local_debug =
|
270 |
#debug specific task by ping
|
271 |
if local_debug:
|
272 |
debug_model_names = ['TinyLlama/TinyLlama-1.1B-Chat-v0.6']
|
|
|
94 |
|
95 |
def process_evaluation(task: Task, eval_request: EvalRequest) -> dict:
|
96 |
batch_size = "auto"
|
|
|
97 |
try:
|
98 |
results = run_evaluation(eval_request=eval_request, task_names=[task.benchmark], num_fewshot=task.num_fewshot,
|
99 |
batch_size=batch_size, device=DEVICE, use_cache=None, limit=LIMIT)
|
|
|
265 |
if __name__ == "__main__":
|
266 |
wait = True
|
267 |
hard_task_lst = None
|
268 |
+
local_debug = False
|
269 |
#debug specific task by ping
|
270 |
if local_debug:
|
271 |
debug_model_names = ['TinyLlama/TinyLlama-1.1B-Chat-v0.6']
|