BenchmarkBot commited on
Commit
d8b9ce2
β€’
1 Parent(s): 07c6067

filtered out some columns

Browse files
Files changed (2) hide show
  1. app.py +21 -25
  2. src/assets/text_content.py +1 -4
app.py CHANGED
@@ -1,28 +1,25 @@
1
  import os
2
  import gradio as gr
3
  import pandas as pd
4
- from huggingface_hub import HfApi
5
- from huggingface_hub import Repository
6
  from apscheduler.schedulers.background import BackgroundScheduler
7
 
8
- from src.assets.text_content import *
9
- from src.assets.css_html_js import custom_css
10
 
11
  OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
12
 
13
  LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
14
  LLM_PERF_DATASET_REPO = "optimum/llm-perf"
15
 
16
- api = HfApi()
17
-
18
 
19
  def restart_space():
20
- api.restart_space(
21
  repo_id=LLM_PERF_LEADERBOARD_REPO, token=OPTIMUM_TOKEN
22
  )
23
 
24
 
25
- def load_all_info_from_hub():
26
  llm_perf_repo = None
27
  if OPTIMUM_TOKEN:
28
  print("Loading LLM-Perf-Dataset from Hub...")
@@ -37,29 +34,26 @@ def load_all_info_from_hub():
37
  return llm_perf_repo
38
 
39
 
40
- llm_perf_repo = load_all_info_from_hub()
41
-
42
-
43
- def has_no_nan_values(df, columns):
44
- return df[columns].notna().all(axis=1)
45
-
46
-
47
- def has_nan_values(df, columns):
48
- return df[columns].isna().any(axis=1)
49
-
50
-
51
  def get_leaderboard_df():
52
  if llm_perf_repo:
53
  llm_perf_repo.git_pull()
54
 
55
  df = pd.read_csv("./llm-perf/reports/cuda_1_100/inference_report.csv")
56
- print(df.columns)
 
57
 
58
- return df
 
 
 
 
 
 
 
59
 
 
60
 
61
- original_df = get_leaderboard_df()
62
- leaderboard_df = original_df.copy()
63
 
64
 
65
  def refresh():
@@ -68,14 +62,16 @@ def refresh():
68
  return leaderboard_df
69
 
70
 
 
 
71
  demo = gr.Blocks(css=custom_css)
72
  with demo:
73
  gr.HTML(TITLE)
74
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
75
 
76
- print("rendering tab...")
77
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
78
- with gr.TabItem("πŸ… LLM-Perf Benchmark", elem_id="llm-perf-benchmark-tab-table", id=0):
 
79
  leaderboard_table_lite = gr.components.Dataframe(
80
  value=leaderboard_df,
81
  headers=leaderboard_df.columns.tolist(),
 
1
  import os
2
  import gradio as gr
3
  import pandas as pd
4
+ from huggingface_hub import HfApi, Repository
 
5
  from apscheduler.schedulers.background import BackgroundScheduler
6
 
7
+ from src.assets.text_content import TITLE, INTRODUCTION_TEXT
8
+ from src.assets.css_html_js import custom_css, get_window_url_params
9
 
10
  OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
11
 
12
  LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
13
  LLM_PERF_DATASET_REPO = "optimum/llm-perf"
14
 
 
 
15
 
16
  def restart_space():
17
+ HfApi().restart_space(
18
  repo_id=LLM_PERF_LEADERBOARD_REPO, token=OPTIMUM_TOKEN
19
  )
20
 
21
 
22
+ def load_dataset_repo():
23
  llm_perf_repo = None
24
  if OPTIMUM_TOKEN:
25
  print("Loading LLM-Perf-Dataset from Hub...")
 
34
  return llm_perf_repo
35
 
36
 
 
 
 
 
 
 
 
 
 
 
 
37
  def get_leaderboard_df():
38
  if llm_perf_repo:
39
  llm_perf_repo.git_pull()
40
 
41
  df = pd.read_csv("./llm-perf/reports/cuda_1_100/inference_report.csv")
42
+ df = df[["model", "backend.name", "backend.torch_dtype", "backend.quantization",
43
+ "generate.latency(s)", "generate.throughput(tokens/s)"]]
44
 
45
+ df.rename(columns={
46
+ "model": "Model",
47
+ "backend.name": "Backend",
48
+ "backend.torch_dtype": "Torch dtype",
49
+ "backend.quantization": "Quantization",
50
+ "generate.latency(s)": "Latency (s)",
51
+ "generate.throughput(tokens/s)": "Throughput (tokens/s)"
52
+ }, inplace=True)
53
 
54
+ df.sort_values(by=["Throughput (tokens/s)"], ascending=False, inplace=True)
55
 
56
+ return df
 
57
 
58
 
59
  def refresh():
 
62
  return leaderboard_df
63
 
64
 
65
+ llm_perf_repo = load_dataset_repo()
66
+
67
  demo = gr.Blocks(css=custom_css)
68
  with demo:
69
  gr.HTML(TITLE)
70
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
71
 
 
72
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
73
+ with gr.TabItem("Vanilla Benchmark", elem_id="vanilla-benchmark", id=0):
74
+ leaderboard_df = get_leaderboard_df()
75
  leaderboard_table_lite = gr.components.Dataframe(
76
  value=leaderboard_df,
77
  headers=leaderboard_df.columns.tolist(),
src/assets/text_content.py CHANGED
@@ -1,8 +1,5 @@
1
  TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM-Perf Leaderboard</h1>"""
2
 
3
  INTRODUCTION_TEXT = f"""
4
- The πŸ€— Open LLM-Perf Leaderboard aims to benchmark the performance of large language models (LLMs) on different backends and hardwares using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark)
5
- """
6
-
7
- LLM_BENCHMARKS_TEXT = f"""
8
  """
 
1
  TITLE = """<h1 align="center" id="space-title">πŸ€— Open LLM-Perf Leaderboard</h1>"""
2
 
3
  INTRODUCTION_TEXT = f"""
4
+ The πŸ€— Open LLM-Perf Leaderboard aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) on different backends and hardwares using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark)
 
 
 
5
  """