pufanyi commited on
Commit
7b3ec63
β€’
1 Parent(s): 9b15efc

chore: Update page title to "Live Bench"

Browse files
Files changed (1) hide show
  1. app.py +86 -86
app.py CHANGED
@@ -101,92 +101,92 @@ with demo:
101
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
-
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
- with gr.Row():
145
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
146
-
147
- with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
158
-
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
-
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
- )
190
 
191
  with gr.Row():
192
  with gr.Accordion("πŸ“™ Citation", open=False):
 
101
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
102
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
 
104
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
+ # with gr.Column():
106
+ # with gr.Row():
107
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
+
109
+ # with gr.Column():
110
+ # with gr.Accordion(
111
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
112
+ # open=False,
113
+ # ):
114
+ # with gr.Row():
115
+ # finished_eval_table = gr.components.Dataframe(
116
+ # value=finished_eval_queue_df,
117
+ # headers=EVAL_COLS,
118
+ # datatype=EVAL_TYPES,
119
+ # row_count=5,
120
+ # )
121
+ # with gr.Accordion(
122
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
123
+ # open=False,
124
+ # ):
125
+ # with gr.Row():
126
+ # running_eval_table = gr.components.Dataframe(
127
+ # value=running_eval_queue_df,
128
+ # headers=EVAL_COLS,
129
+ # datatype=EVAL_TYPES,
130
+ # row_count=5,
131
+ # )
132
+
133
+ # with gr.Accordion(
134
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
+ # open=False,
136
+ # ):
137
+ # with gr.Row():
138
+ # pending_eval_table = gr.components.Dataframe(
139
+ # value=pending_eval_queue_df,
140
+ # headers=EVAL_COLS,
141
+ # datatype=EVAL_TYPES,
142
+ # row_count=5,
143
+ # )
144
+ # with gr.Row():
145
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
146
+
147
+ # with gr.Row():
148
+ # with gr.Column():
149
+ # model_name_textbox = gr.Textbox(label="Model name")
150
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
+ # model_type = gr.Dropdown(
152
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
+ # label="Model type",
154
+ # multiselect=False,
155
+ # value=None,
156
+ # interactive=True,
157
+ # )
158
+
159
+ # with gr.Column():
160
+ # precision = gr.Dropdown(
161
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
+ # label="Precision",
163
+ # multiselect=False,
164
+ # value="float16",
165
+ # interactive=True,
166
+ # )
167
+ # weight_type = gr.Dropdown(
168
+ # choices=[i.value.name for i in WeightType],
169
+ # label="Weights type",
170
+ # multiselect=False,
171
+ # value="Original",
172
+ # interactive=True,
173
+ # )
174
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
+
176
+ # submit_button = gr.Button("Submit Eval")
177
+ # submission_result = gr.Markdown()
178
+ # submit_button.click(
179
+ # add_new_eval,
180
+ # [
181
+ # model_name_textbox,
182
+ # base_model_name_textbox,
183
+ # revision_name_textbox,
184
+ # precision,
185
+ # weight_type,
186
+ # model_type,
187
+ # ],
188
+ # submission_result,
189
+ # )
190
 
191
  with gr.Row():
192
  with gr.Accordion("πŸ“™ Citation", open=False):