import plotly.express as px
import os
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT, SINGLE_A100_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.utils import restart_space, load_dataset_repo, make_clickable_model, make_clickable_score, submit_query
from src.assets.css_html_js import custom_css
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
COLUMNS_MAPPING = {
"model": "Model 🤗",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Datatype 📥",
"forward.peak_memory(MB)": "Peak Memory (MB) ⬇️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
"h4_score": "H4 Score ⬆️",
}
COLUMNS_DATATYPES = ["markdown", "str", "str", "number", "number", "markdown"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df(benchmark):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
bench_df = pd.read_csv(
f"./llm-perf-dataset/reports/{benchmark}.csv")
scores_df = pd.read_csv(
f"./llm-perf-dataset/reports/additional_data.csv")
bench_df = bench_df.merge(scores_df, on="model", how="left")
# preprocess
bench_df["model"] = bench_df["model"].apply(make_clickable_model)
bench_df["h4_score"] = bench_df["h4_score"].apply(make_clickable_score)
# filter
bench_df = bench_df[list(COLUMNS_MAPPING.keys())]
# rename
bench_df.rename(columns=COLUMNS_MAPPING, inplace=True)
# sort
bench_df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
return bench_df
# Dataframes
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
def get_benchmark_plot(benchmark):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
bench_df = pd.read_csv(
f"./llm-perf-dataset/reports/{benchmark}.csv")
scores_df = pd.read_csv(
f"./llm-perf-dataset/reports/additional_data.csv")
bench_df = bench_df.merge(scores_df, on="model", how="left")
bench_df = bench_df[bench_df["generate.latency(s)"] < 100]
fig = px.scatter(
bench_df, x="h4_score", y="generate.latency(s)",
color='model_type', symbol='backend.name', size='forward.peak_memory(MB)',
custom_data=['model', 'backend.name', 'backend.torch_dtype',
'forward.peak_memory(MB)', 'generate.throughput(tokens/s)'],
)
fig.update_layout(
title={
'text': "Model Score vs. Latency vs. Memory",
'y': 0.95, 'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'
},
xaxis_title="Average H4 Score",
yaxis_title="Latency per 1000 Tokens (s)",
legend_title="Model Type",
)
fig.update_traces(
hovertemplate="
".join([
"Model: %{customdata[0]}",
"Backend: %{customdata[1]}",
"Datatype: %{customdata[2]}",
"Peak Memory (MB): %{customdata[3]}",
"Throughput (tokens/s): %{customdata[4]}",
"Latency per 1000 Tokens (s): %{y}",
"Average H4 Score: %{x}"
])
)
return fig
# Plots
single_A100_plot = get_benchmark_plot(benchmark="1xA100-80GB")
# Demo interface
demo = gr.Blocks(css=custom_css)
with demo:
# leaderboard title
gr.HTML(TITLE)
# introduction text
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
# control panel title
gr.HTML("