Clémentine commited on
Commit
43c04ea
1 Parent(s): 64d2c90

removed tabs

Browse files
Files changed (2) hide show
  1. app.py +64 -63
  2. src/static/about.py +9 -15
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from apscheduler.schedulers.background import BackgroundScheduler
3
  from src.static.env import API, REPO_ID, HF_TOKEN
4
- from src.static.about import TITLE, INTRO, ABOUT, DOCUMENTATION
5
 
6
  from src.leaderboards.get_from_hub import get_leaderboard_info
7
  from src.static.tag_info import *
@@ -44,68 +44,69 @@ with demo:
44
  gr.Markdown(TITLE)
45
  gr.Markdown(INTRO, elem_classes="markdown-text")
46
 
47
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
48
- with gr.TabItem("Search"):
49
- with gr.Row():
50
- with gr.Column():
51
- modality_tags = gr.CheckboxGroup(
52
- choices=[tag.name for tag in Modality],
53
- value=[],
54
- label="Modality of choice"
55
- )
56
- submission_tags = gr.CheckboxGroup(
57
- choices=[tag.name for tag in SubmissionType],
58
- value=[],
59
- label="Submission type"
60
- )
61
- test_set_tags = gr.CheckboxGroup(
62
- choices=[tag.name for tag in TestSetStatus],
63
- value=[],
64
- label="Test set status"
65
- )
66
- judge_tags = gr.CheckboxGroup(
67
- choices=[tag.name for tag in Judge],
68
- value=[],
69
- label="Judge used for the evaluation"
70
- )
71
- with gr.Column():
72
- show_all = gr.Checkbox(
73
- value=False,
74
- label="Show all leaderboards"
75
- )
76
- evaluation_tags = gr.CheckboxGroup(
77
- choices=[tag.name for tag in EvaluationCategory],
78
- value=[],
79
- label="Specific evaluation categories"
80
- )
81
- language_tags = gr.CheckboxGroup(
82
- choices=[tag.capitalize() for tag in sorted(list(INFO_TO_LEADERBOARDS["language"].keys()))],
83
- value=[],
84
- label="Specific languages"
85
- )
86
- with gr.Row():
87
- leaderboards = gr.Markdown(
88
- value="",
89
- )
90
-
91
- for selector in [modality_tags, submission_tags, test_set_tags, evaluation_tags, language_tags, judge_tags]:
92
- selector.change(
93
- lambda _: False,
94
- outputs=show_all
95
- )
96
- for selector in [show_all, modality_tags, submission_tags, test_set_tags, evaluation_tags, language_tags, judge_tags]:
97
- selector.change(
98
- update_leaderboards,
99
- [show_all, modality_tags, submission_tags, test_set_tags, evaluation_tags, language_tags, judge_tags],
100
- leaderboards,
101
- queue=True,
102
- )
103
-
104
- with gr.TabItem("About"):
105
- gr.Markdown(ABOUT, elem_classes="markdown-text")
106
-
107
- with gr.TabItem("Documentation"):
108
- gr.Markdown(DOCUMENTATION, elem_classes="markdown-text")
 
109
 
110
  scheduler = BackgroundScheduler()
111
  scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h
 
1
  import gradio as gr
2
  from apscheduler.schedulers.background import BackgroundScheduler
3
  from src.static.env import API, REPO_ID, HF_TOKEN
4
+ from src.static.about import TITLE, INTRO, ABOUT, DOCUMENTATION, SUBMIT
5
 
6
  from src.leaderboards.get_from_hub import get_leaderboard_info
7
  from src.static.tag_info import *
 
44
  gr.Markdown(TITLE)
45
  gr.Markdown(INTRO, elem_classes="markdown-text")
46
 
47
+ with gr.Row():
48
+ with gr.Column():
49
+ modality_tags = gr.CheckboxGroup(
50
+ choices=[tag.name for tag in Modality],
51
+ value=[],
52
+ label="Modality of choice"
53
+ )
54
+ submission_tags = gr.CheckboxGroup(
55
+ choices=[tag.name for tag in SubmissionType],
56
+ value=[],
57
+ label="Submission type"
58
+ )
59
+ test_set_tags = gr.CheckboxGroup(
60
+ choices=[tag.name for tag in TestSetStatus],
61
+ value=[],
62
+ label="Test set status"
63
+ )
64
+ judge_tags = gr.CheckboxGroup(
65
+ choices=[tag.name for tag in Judge],
66
+ value=[],
67
+ label="Judge used for the evaluation"
68
+ )
69
+ with gr.Column():
70
+ show_all = gr.Checkbox(
71
+ value=False,
72
+ label="Show all leaderboards"
73
+ )
74
+ evaluation_tags = gr.CheckboxGroup(
75
+ choices=[tag.name for tag in EvaluationCategory],
76
+ value=[],
77
+ label="Specific evaluation categories"
78
+ )
79
+ language_tags = gr.CheckboxGroup(
80
+ choices=[tag.capitalize() for tag in sorted(list(INFO_TO_LEADERBOARDS["language"].keys()))],
81
+ value=[],
82
+ label="Specific languages"
83
+ )
84
+ with gr.Row():
85
+ leaderboards = gr.Markdown(
86
+ value="",
87
+ )
88
+
89
+ for selector in [modality_tags, submission_tags, test_set_tags, evaluation_tags, language_tags, judge_tags]:
90
+ selector.change(
91
+ lambda _: False,
92
+ outputs=show_all
93
+ )
94
+ for selector in [show_all, modality_tags, submission_tags, test_set_tags, evaluation_tags, language_tags, judge_tags]:
95
+ selector.change(
96
+ update_leaderboards,
97
+ [show_all, modality_tags, submission_tags, test_set_tags, evaluation_tags, language_tags, judge_tags],
98
+ leaderboards,
99
+ queue=True,
100
+ )
101
+
102
+ with gr.Accordion("How to submit your leaderboard?", open=False):
103
+ gr.Markdown(SUBMIT, elem_classes="markdown-text")
104
+
105
+ with gr.Accordion("Tags documentation", open=False):
106
+ gr.Markdown(ABOUT, elem_classes="markdown-text")
107
+
108
+ with gr.Accordion("How to build your own leaderboard?", open=False):
109
+ gr.Markdown(DOCUMENTATION, elem_classes="markdown-text")
110
 
111
  scheduler = BackgroundScheduler()
112
  scheduler.add_job(restart_space, "interval", seconds=10800) # restarted every 3h
src/static/about.py CHANGED
@@ -6,28 +6,24 @@ INTRO = """
6
  Have you ever wondered which leaderboard would be best for your use case?
7
  """
8
 
9
- ABOUT = ("""
10
- If you want your leaderboard to appear in our suggestions, feel free to add relevant information in its tag metadata, and it will be displayed here.
11
-
12
- # First step
13
-
14
  Make sure to either use the tag `leaderboard` or `arena` to your space, by adding the following to your README
15
 
16
  ```
17
  tags:
18
  - leaderboard
19
  ```
 
20
 
21
- # Extra tags
22
-
23
- ## Submission type
24
  Arenas are not concerned by this category.
25
 
26
 
27
  """ +
28
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in SubmissionType]) +
29
  """
30
- ## Test set status
31
  Arenas are not concerned by this category.
32
 
33
 
@@ -35,34 +31,32 @@ Arenas are not concerned by this category.
35
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in TestSetStatus]) +
36
  """
37
 
38
- ## Judges
39
 
40
  """ +
41
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in Judge]) +
42
  """
43
 
44
- ## Modalities
45
  Can be any (or several) of the following list:
46
 
47
  """ +
48
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in Modality]) +
49
  """
50
 
51
- ## Evaluation categories
52
  Can be any (or several) of the following list:
53
 
54
  """ +
55
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in EvaluationCategory]) +
56
  """
57
 
58
- ## Language
59
  You can indicate the languages covered by your benchmark like so: `language:mylanguage`.
60
  At the moment, we do not support language codes, please use the language name in English.
61
  """)
62
 
63
  DOCUMENTATION = """
64
- How to create your own leaderboard?
65
-
66
  I'll make an updated documentation page here at some point, but for now, you can check our [demo leaderboard org](https://huggingface.co/demo-leaderboard-backend)!
67
 
68
  You just need to duplicate the front space (and backend if you want to run your leaderboard on spaces compute), copy the datasets to your own org, and edit the env variables.
 
6
  Have you ever wondered which leaderboard would be best for your use case?
7
  """
8
 
9
+ SUBMIT = """
 
 
 
 
10
  Make sure to either use the tag `leaderboard` or `arena` to your space, by adding the following to your README
11
 
12
  ```
13
  tags:
14
  - leaderboard
15
  ```
16
+ """
17
 
18
+ ABOUT = ("""
19
+ ### Submission type
 
20
  Arenas are not concerned by this category.
21
 
22
 
23
  """ +
24
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in SubmissionType]) +
25
  """
26
+ ### Test set status
27
  Arenas are not concerned by this category.
28
 
29
 
 
31
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in TestSetStatus]) +
32
  """
33
 
34
+ ### Judges
35
 
36
  """ +
37
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in Judge]) +
38
  """
39
 
40
+ ### Modalities
41
  Can be any (or several) of the following list:
42
 
43
  """ +
44
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in Modality]) +
45
  """
46
 
47
+ ### Evaluation categories
48
  Can be any (or several) of the following list:
49
 
50
  """ +
51
  "\n".join([f"- `{s.value.key}`: {s.value.usage}" for s in EvaluationCategory]) +
52
  """
53
 
54
+ ### Language
55
  You can indicate the languages covered by your benchmark like so: `language:mylanguage`.
56
  At the moment, we do not support language codes, please use the language name in English.
57
  """)
58
 
59
  DOCUMENTATION = """
 
 
60
  I'll make an updated documentation page here at some point, but for now, you can check our [demo leaderboard org](https://huggingface.co/demo-leaderboard-backend)!
61
 
62
  You just need to duplicate the front space (and backend if you want to run your leaderboard on spaces compute), copy the datasets to your own org, and edit the env variables.