Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Fix search
Browse files- app.py +4 -2
- src/display/utils.py +2 -0
- src/leaderboard/read_evals.py +1 -0
app.py
CHANGED
@@ -124,7 +124,7 @@ def filter_models(
|
|
124 |
|
125 |
|
126 |
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
127 |
-
return df[
|
128 |
|
129 |
|
130 |
def filter_queries(query: str, filtered_df: pd.DataFrame):
|
@@ -155,7 +155,9 @@ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
|
|
155 |
|
156 |
# 'always_here_cols' γ 'columns' γγι€ε€γγ¦ιθ€γιΏγγ
|
157 |
columns = [c for c in columns if c not in always_here_cols]
|
158 |
-
new_columns =
|
|
|
|
|
159 |
|
160 |
# ιθ€γζι€γγ€γ€ι εΊγηΆζ
|
161 |
seen = set()
|
|
|
124 |
|
125 |
|
126 |
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
127 |
+
return df[df[AutoEvalColumn.dummy.name].str.contains(query, case=False)]
|
128 |
|
129 |
|
130 |
def filter_queries(query: str, filtered_df: pd.DataFrame):
|
|
|
155 |
|
156 |
# 'always_here_cols' γ 'columns' γγι€ε€γγ¦ιθ€γιΏγγ
|
157 |
columns = [c for c in columns if c not in always_here_cols]
|
158 |
+
new_columns = (
|
159 |
+
always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]
|
160 |
+
)
|
161 |
|
162 |
# ιθ€γζι€γγ€γ€ι εΊγηΆζ
|
163 |
seen = set()
|
src/display/utils.py
CHANGED
@@ -19,6 +19,7 @@ class ColumnContent:
|
|
19 |
displayed_by_default: bool
|
20 |
hidden: bool = False
|
21 |
never_hidden: bool = False
|
|
|
22 |
|
23 |
## Leaderboard columns
|
24 |
auto_eval_column_dict = []
|
@@ -41,6 +42,7 @@ auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Avai
|
|
41 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
42 |
auto_eval_column_dict.append(["num_few_shots", ColumnContent, ColumnContent("Few-shot", "str", False)])
|
43 |
auto_eval_column_dict.append(["add_special_tokens", ColumnContent, ColumnContent("Add Special Tokens", "bool", False)])
|
|
|
44 |
|
45 |
# We use make dataclass to dynamically fill the scores from Tasks
|
46 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
|
|
19 |
displayed_by_default: bool
|
20 |
hidden: bool = False
|
21 |
never_hidden: bool = False
|
22 |
+
dummy: bool = False
|
23 |
|
24 |
## Leaderboard columns
|
25 |
auto_eval_column_dict = []
|
|
|
42 |
auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
43 |
auto_eval_column_dict.append(["num_few_shots", ColumnContent, ColumnContent("Few-shot", "str", False)])
|
44 |
auto_eval_column_dict.append(["add_special_tokens", ColumnContent, ColumnContent("Add Special Tokens", "bool", False)])
|
45 |
+
auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)])
|
46 |
|
47 |
# We use make dataclass to dynamically fill the scores from Tasks
|
48 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
src/leaderboard/read_evals.py
CHANGED
@@ -150,6 +150,7 @@ class EvalResult:
|
|
150 |
AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
151 |
AutoEvalColumn.architecture.name: self.architecture,
|
152 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
|
|
153 |
AutoEvalColumn.revision.name: self.revision,
|
154 |
# AutoEvalColumn.average.name: None,
|
155 |
AutoEvalColumn.license.name: self.license,
|
|
|
150 |
AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
151 |
AutoEvalColumn.architecture.name: self.architecture,
|
152 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
153 |
+
AutoEvalColumn.dummy.name: self.full_model,
|
154 |
AutoEvalColumn.revision.name: self.revision,
|
155 |
# AutoEvalColumn.average.name: None,
|
156 |
AutoEvalColumn.license.name: self.license,
|