Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
t0-0
commited on
Commit
·
b246f21
1
Parent(s):
bbe855d
Changed the timing of model architecture retrieval to when submitting.
Browse files- src/leaderboard/read_evals.py +1 -11
- src/submission/submit.py +9 -1
src/leaderboard/read_evals.py
CHANGED
@@ -8,7 +8,6 @@ import dateutil
|
|
8 |
|
9 |
from src.display.formatting import make_clickable_model
|
10 |
from src.display.utils import AutoEvalColumn, Backend, ModelType, Tasks, Version, WeightType
|
11 |
-
from src.submission.check_validity import is_model_on_hub
|
12 |
|
13 |
|
14 |
@dataclass
|
@@ -90,15 +89,6 @@ class EvalResult:
|
|
90 |
result_key = f"{model}_{precision}_({num_few_shots}shots)_{add_special_tokens}"
|
91 |
full_model = "/".join(org_and_model)
|
92 |
|
93 |
-
still_on_hub, _, model_config = is_model_on_hub(
|
94 |
-
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
95 |
-
)
|
96 |
-
architecture = "?"
|
97 |
-
if model_config is not None:
|
98 |
-
architectures = getattr(model_config, "architectures", None)
|
99 |
-
if architectures:
|
100 |
-
architecture = ";".join(architectures)
|
101 |
-
|
102 |
if "scores" not in data:
|
103 |
raise KeyError(f"'scores' key not found in JSON file: {json_filepath}")
|
104 |
|
@@ -117,7 +107,6 @@ class EvalResult:
|
|
117 |
results=results,
|
118 |
precision=precision,
|
119 |
revision=revision,
|
120 |
-
architecture=architecture,
|
121 |
num_few_shots=num_few_shots,
|
122 |
add_special_tokens=add_special_tokens,
|
123 |
llm_jp_eval_version=version,
|
@@ -136,6 +125,7 @@ class EvalResult:
|
|
136 |
self.likes = request.get("likes", 0)
|
137 |
self.num_params = request.get("params", 0)
|
138 |
self.date = request.get("submitted_time", "")
|
|
|
139 |
except Exception:
|
140 |
print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision}")
|
141 |
|
|
|
8 |
|
9 |
from src.display.formatting import make_clickable_model
|
10 |
from src.display.utils import AutoEvalColumn, Backend, ModelType, Tasks, Version, WeightType
|
|
|
11 |
|
12 |
|
13 |
@dataclass
|
|
|
89 |
result_key = f"{model}_{precision}_({num_few_shots}shots)_{add_special_tokens}"
|
90 |
full_model = "/".join(org_and_model)
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
if "scores" not in data:
|
93 |
raise KeyError(f"'scores' key not found in JSON file: {json_filepath}")
|
94 |
|
|
|
107 |
results=results,
|
108 |
precision=precision,
|
109 |
revision=revision,
|
|
|
110 |
num_few_shots=num_few_shots,
|
111 |
add_special_tokens=add_special_tokens,
|
112 |
llm_jp_eval_version=version,
|
|
|
125 |
self.likes = request.get("likes", 0)
|
126 |
self.num_params = request.get("params", 0)
|
127 |
self.date = request.get("submitted_time", "")
|
128 |
+
self.architecture = request.get("architecture", "?")
|
129 |
except Exception:
|
130 |
print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision}")
|
131 |
|
src/submission/submit.py
CHANGED
@@ -47,7 +47,14 @@ def add_new_eval(
|
|
47 |
revision = "main"
|
48 |
|
49 |
# Is the model on the hub?
|
50 |
-
model_on_hub, error,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
if not model_on_hub:
|
52 |
return styled_error(f'Model "{model}" {error}')
|
53 |
|
@@ -85,6 +92,7 @@ def add_new_eval(
|
|
85 |
"private": False,
|
86 |
"add_special_tokens": add_special_tokens,
|
87 |
"llm_jp_eval_version": current_version,
|
|
|
88 |
}
|
89 |
|
90 |
# Check for duplicate submission
|
|
|
47 |
revision = "main"
|
48 |
|
49 |
# Is the model on the hub?
|
50 |
+
model_on_hub, error, model_config = is_model_on_hub(
|
51 |
+
model_name=model, revision=revision, token=TOKEN, test_tokenizer=True
|
52 |
+
)
|
53 |
+
architecture = "?"
|
54 |
+
if model_config is not None:
|
55 |
+
architectures = getattr(model_config, "architectures", None)
|
56 |
+
if architectures:
|
57 |
+
architecture = ";".join(architectures)
|
58 |
if not model_on_hub:
|
59 |
return styled_error(f'Model "{model}" {error}')
|
60 |
|
|
|
92 |
"private": False,
|
93 |
"add_special_tokens": add_special_tokens,
|
94 |
"llm_jp_eval_version": current_version,
|
95 |
+
"architecture": architecture,
|
96 |
}
|
97 |
|
98 |
# Check for duplicate submission
|