fdisk commited on
Commit
19112df
·
1 Parent(s): 61f4e5e

1. 모델명 검색 구현 2. repo 재시작 스케줄러

Browse files
Files changed (1) hide show
  1. app.py +84 -12
app.py CHANGED
@@ -1,27 +1,76 @@
1
  import gradio as gr
2
  import pandas as pd
3
  import requests
4
-
 
5
  from src.about import (
6
  INTRODUCTION_TEXT,
7
  LLM_BENCHMARKS_TEXT,
8
  TITLE,
9
  )
10
  from src.display.css_html_js import custom_css
 
 
 
 
 
 
 
 
 
11
 
12
 
13
  def get_evaluation():
14
- response = requests.get("http://aim100.qinference.com/api/leaderboard/list")
15
- data_json = response.json()
16
- df = pd.DataFrame(data_json)
17
- for col in df.columns:
18
- df.loc[df[col] == 0, col] = '-'
19
- df.insert(0, 'No', df.reset_index().index + 1)
20
- ret = df.drop(columns='nodeSeq').rename(columns={'modelName': 'Model'})
21
- ret.columns = [x.capitalize() for x in ret.columns]
 
 
 
 
 
 
22
  return ret
23
 
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  leaderboard = gr.Blocks(css=custom_css)
26
  with leaderboard:
27
  gr.HTML(TITLE)
@@ -29,15 +78,38 @@ with leaderboard:
29
 
30
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
31
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
 
 
 
 
 
 
32
  leaderboard_table = gr.components.Dataframe(
33
- value=get_evaluation(),
34
  elem_id="leaderboard-table",
35
  interactive=False,
36
  visible=True,
37
  )
38
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
40
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
41
 
42
-
43
  leaderboard.queue(default_concurrency_limit=40).launch()
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  import requests
4
+ from datetime import datetime
5
+ from apscheduler.schedulers.background import BackgroundScheduler
6
  from src.about import (
7
  INTRODUCTION_TEXT,
8
  LLM_BENCHMARKS_TEXT,
9
  TITLE,
10
  )
11
  from src.display.css_html_js import custom_css
12
+ from huggingface_hub import HfApi
13
+ repo_id = "qinference/AIM100Leaderboard"
14
+ api = HfApi()
15
+ last_updated = datetime.now().strftime('Last updated at %Y-%m-%d %H:%m:%S')
16
+
17
+
18
+ def restart_leaderboard():
19
+ api.pause_space(repo_id=repo_id)
20
+ api.restart_space(repo_id=repo_id)
21
 
22
 
23
  def get_evaluation():
24
+ global last_updated
25
+ try:
26
+ response = requests.get("http://aim100.qinference.com/api/leaderboard/list")
27
+ data_json = response.json()
28
+ df = pd.DataFrame(data_json)
29
+ for col in df.columns:
30
+ df.loc[df[col] == 0, col] = '-'
31
+ df.insert(0, 'No', df.reset_index().index + 1)
32
+ ret = df.drop(columns='nodeSeq').rename(columns={'modelName': 'Model'})
33
+ ret.columns = [x.capitalize() for x in ret.columns]
34
+ except ValueError:
35
+ ret = default_evaluation()
36
+
37
+ last_updated = "<p style='text-align: right; padding-right: 5px;'>" + datetime.now().strftime('Last updated at %Y-%m-%d %H:%m:%S') + "</p>"
38
  return ret
39
 
40
 
41
+ def default_evaluation():
42
+ global last_updated
43
+
44
+ default_data = [{
45
+ "No": "-",
46
+ "Model": "-",
47
+ "Total": "-",
48
+ "Inference": "-",
49
+ "Grammar": "-",
50
+ "Understanding": "-",
51
+ "Coding": "-",
52
+ "Math": "-",
53
+ "Writing": "-",
54
+ "Etc": "-"
55
+ }]
56
+ df = pd.DataFrame(default_data)
57
+ last_updated = datetime.now().strftime('Last updated at %Y-%m-%d %H:%m:%S')
58
+ return df
59
+
60
+
61
+ # Searching and filtering
62
+ def update_table(
63
+ hidden_df: pd.DataFrame,
64
+ query: str,
65
+ ):
66
+ filtered_df = hidden_df[hidden_df["Model"].str.contains(query, case=False)]
67
+ return filtered_df
68
+
69
+
70
+ original_df = get_evaluation()
71
+ leaderboard_df = original_df.copy()
72
+
73
+
74
  leaderboard = gr.Blocks(css=custom_css)
75
  with leaderboard:
76
  gr.HTML(TITLE)
 
78
 
79
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
80
  with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
81
+ with gr.Row():
82
+ search_bar = gr.Textbox(
83
+ placeholder=" 🔍 Search for your model (separate multiple queries with `;`) and press ENTER...",
84
+ show_label=False,
85
+ elem_id="search-bar",
86
+ )
87
  leaderboard_table = gr.components.Dataframe(
88
+ value=leaderboard_df,
89
  elem_id="leaderboard-table",
90
  interactive=False,
91
  visible=True,
92
  )
93
+ # Dummy leaderboard for handling the case when the user uses backspace key
94
+ hidden_leaderboard_table_for_search = gr.components.Dataframe(
95
+ value=original_df,
96
+ visible=False,
97
+ )
98
+ search_bar.submit(
99
+ update_table,
100
+ [
101
+ hidden_leaderboard_table_for_search,
102
+ search_bar
103
+ ],
104
+ leaderboard_table,
105
+ )
106
+ scheduler = BackgroundScheduler()
107
+ scheduler.add_job(restart_leaderboard, "interval", seconds=60)
108
+ scheduler.start()
109
+ with gr.Row():
110
+ gr.HTML(last_updated)
111
  with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
112
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
113
 
 
114
  leaderboard.queue(default_concurrency_limit=40).launch()
115
+