BenchmarkBot's picture
test
ce249f3
raw
history blame
7.44 kB
import os
import json
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.utils import restart_space, load_dataset_repo, make_clickable_model
from src.assets.css_html_js import custom_css, get_window_url_params
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
COLUMNS_MAPPING = {
"model": "Model πŸ€—",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Datatype πŸ“₯",
"average": "Average H4 Score ⬆️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
}
COLUMNS_DATATYPES = ["markdown", "str", "str", "number", "number", "number"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df(benchmark):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
bench_df = pd.read_csv(
f"./llm-perf-dataset/reports/{benchmark}/inference_report.csv")
scores_df = pd.read_csv(
f"./llm-perf-dataset/reports/average_scores.csv")
# merge on model
bench_df = bench_df.merge(
scores_df, how="left", left_on="model", right_on="model")
# preprocess
bench_df["model"] = bench_df["model"].apply(make_clickable_model)
# filter
bench_df = bench_df[list(COLUMNS_MAPPING.keys())]
# rename
bench_df.rename(columns=COLUMNS_MAPPING, inplace=True)
# sort
bench_df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
return bench_df
def change_tab(query_param):
query_param = query_param.replace("'", '"')
query_param = json.loads(query_param)
if (
isinstance(query_param, dict)
and "tab" in query_param
and query_param["tab"] == "evaluation"
):
return gr.Tabs.update(selected=1)
else:
return gr.Tabs.update(selected=0)
def submit_query(single_df, multi_df, text, backends, datatypes, threshold):
filtered_single = single_df[
single_df["Model πŸ€—"].str.contains(text) &
single_df["Backend 🏭"].isin(backends) &
single_df["Datatype πŸ“₯"].isin(datatypes) &
single_df["Average H4 Score ⬆️"] >= threshold
]
filtered_multi = multi_df[
multi_df["Model πŸ€—"].str.contains(text) &
multi_df["Backend 🏭"].isin(backends) &
multi_df["Datatype πŸ“₯"].isin(datatypes) &
multi_df["Average H4 Score ⬆️"] >= threshold
]
return filtered_single, filtered_multi
# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Row():
with gr.Box(elem_id="search-bar-box"):
search_bar = gr.Textbox(
label="Search πŸ”Ž",
info="Search for a model and press Submit πŸš€",
elem_id="search-bar",
)
backend_checkboxes = gr.CheckboxGroup(
choices=["pytorch", "onnxruntime"],
value=["pytorch"],
label="Backends 🏭",
info="Select the backends",
elem_id="backend-checkboxes",
)
datatype_checkboxes = gr.CheckboxGroup(
choices=["float32", "float16"],
value=["float32", "float16"],
label="Datatypes πŸ“₯",
info="Select the load datatypes",
elem_id="datatype-checkboxes",
)
with gr.Row():
with gr.Box(elem_id="threshold-slider-box"):
threshold_slider = gr.Slider(
label="H4 Threshold πŸ“ˆ",
info="Filter by average H4 score",
value=0.0,
elem_id="threshold-slider",
)
with gr.Row():
submit_button = gr.Button(
value="Submit πŸš€",
info="Submit the filters",
elem_id="submit-button",
)
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ–₯️ A100-80GB Benchmark πŸ‹οΈ", elem_id="A100-benchmark", id=0):
SINGLE_A100_TEXT = """<h3>Single-GPU (1xA100):</h3>
<ul>
<li>Singleton Batch (1)</li>
<li>Thousand Tokens (1000)</li>
</ul>
"""
gr.HTML(SINGLE_A100_TEXT)
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
# Original leaderboard table
single_A100_leaderboard = gr.components.Dataframe(
value=single_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
elem_id="1xA100-table",
)
# Dummy Leaderboard table for handling the case when the user uses backspace key
single_A100_for_search = gr.components.Dataframe(
value=single_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
max_rows=None,
visible=False,
)
with gr.TabItem("πŸ–₯️ 4xA100-80GB Benchmark πŸ‹οΈ", elem_id="4xA100-benchmark", id=1):
MULTI_A100_TEXT = """<h3>Multi-GPU (4xA100):</h3>
<ul>
<li>Singleton Batch (1)</li>
<li>Thousand Tokens (1000)</li>
<li>Using <a href="https://huggingface.co/docs/accelerate" target="_blank">Accelerate</a>'s Auto Device Map</li>
</ul>"""
gr.HTML(MULTI_A100_TEXT)
multi_A100_df = get_benchmark_df(benchmark="4xA100-80GB")
multi_A100_leaderboard = gr.components.Dataframe(
value=multi_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
elem_id="4xA100-table",
)
# Dummy Leaderboard table for handling the case when the user uses backspace key
multi_A100_for_search = gr.components.Dataframe(
value=multi_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
max_rows=None,
visible=False,
)
# Callbacks
submit_button.click(submit_query,
[single_A100_for_search, multi_A100_for_search, search_bar,
backend_checkboxes, datatype_checkboxes, threshold_slider],
[single_A100_leaderboard, multi_A100_leaderboard])
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
elem_id="citation-button",
).style(show_copy_button=True)
dummy = gr.Textbox(visible=False)
demo.load(
change_tab,
dummy,
tabs,
_js=get_window_url_params,
)
# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()
# Launch demo
demo.queue(concurrency_count=40).launch()