Retry 1 FAILED models
Browse files
deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B_eval_request_False_bfloat16_Original.json
CHANGED
@@ -8,7 +8,7 @@
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
-
"status": "
|
12 |
"submitted_time": "2025-01-28T23:51:12Z",
|
13 |
"model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
@@ -28,7 +28,5 @@
|
|
28 |
"tweetsentbr": 0.3709422121860369
|
29 |
},
|
30 |
"result_metrics_average": 0.24260416373877167,
|
31 |
-
"result_metrics_npm": -0.1275114377712255
|
32 |
-
"error_msg": "Model architectures ['Qwen2ForCausalLM'] failed to be inspected. Please check the logs for more details.",
|
33 |
-
"traceback": "Traceback (most recent call last):\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 231, in wait_download_and_run_request\n run_request(\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/evaluate_llms.py\", line 102, in run_request\n results = run_eval_on_model(\n ^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/run_eval.py\", line 63, in run_eval_on_model\n result = evaluate(\n ^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/llm_leaderboard_eval_bot/lm_eval_util.py\", line 145, in evaluate\n results = evaluator.simple_evaluate(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/utils.py\", line 419, in _wrapper\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/evaluator.py\", line 100, in simple_evaluate\n lm = lm_eval.api.registry.get_model(model).create_from_arg_string(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/api/model.py\", line 134, in create_from_arg_string\n return cls(**args, **args2)\n ^^^^^^^^^^^^^^^^^^^^\n File \"/workspace/repos/llm_leaderboard/lm-evaluation-harness-pt/lm_eval/models/vllm_causallms.py\", line 125, in __init__\n self.model = LLM(**self.model_args)\n ^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/utils.py\", line 1039, in inner\n return fn(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/entrypoints/llm.py\", line 239, in __init__\n self.llm_engine = self.engine_class.from_engine_args(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/engine/llm_engine.py\", line 479, in from_engine_args\n engine_config = engine_args.create_engine_config(usage_context)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/engine/arg_utils.py\", line 1047, in create_engine_config\n model_config = self.create_model_config()\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/engine/arg_utils.py\", line 972, in create_model_config\n return ModelConfig(\n ^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/config.py\", line 343, in __init__\n self.multimodal_config = self._init_multimodal_config(\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/config.py\", line 402, in _init_multimodal_config\n if ModelRegistry.is_multimodal_model(architectures):\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/model_executor/models/registry.py\", line 432, in is_multimodal_model\n model_cls, _ = self.inspect_model_cls(architectures)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/model_executor/models/registry.py\", line 392, in inspect_model_cls\n return self._raise_for_unsupported(architectures)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/root/miniconda3/envs/torch21/lib/python3.11/site-packages/vllm/model_executor/models/registry.py\", line 349, in _raise_for_unsupported\n raise ValueError(\nValueError: Model architectures ['Qwen2ForCausalLM'] failed to be inspected. Please check the logs for more details.\n"
|
34 |
}
|
|
|
8 |
"architectures": "Qwen2ForCausalLM",
|
9 |
"weight_type": "Original",
|
10 |
"main_language": "English",
|
11 |
+
"status": "RERUN",
|
12 |
"submitted_time": "2025-01-28T23:51:12Z",
|
13 |
"model_type": "💬 : chat (RLHF, DPO, IFT, ...)",
|
14 |
"source": "manual",
|
|
|
28 |
"tweetsentbr": 0.3709422121860369
|
29 |
},
|
30 |
"result_metrics_average": 0.24260416373877167,
|
31 |
+
"result_metrics_npm": -0.1275114377712255
|
|
|
|
|
32 |
}
|