llm-perf-leaderboard / src /latency_score_memory.py
IlyasMoutawwakil's picture
update
ab5f5f1
raw
history blame
1.67 kB
import gradio as gr
import plotly.express as px
SCORE_MEMORY_LATENCY_DATA = [
"Model πŸ€—",
"Arch πŸ›οΈ",
"Params (B)",
"DType πŸ“₯",
"Backend 🏭",
"Open LLM Score (%)",
"Prefill Latency (s)",
"Decode Throughput (tokens/s)",
"Allocated Memory (MB)",
"E2E Latency (s)",
"E2E Throughput (tokens/s)",
]
def get_lat_score_mem_fig(llm_perf_df):
copy_df = llm_perf_df.copy()
# plot
fig = px.scatter(
copy_df,
x="E2E Latency (s)",
y="Open LLM Score (%)",
size="Allocated Memory (MB)",
color="Arch πŸ›οΈ",
custom_data=SCORE_MEMORY_LATENCY_DATA,
color_discrete_sequence=px.colors.qualitative.Light24,
)
fig.update_traces(
hovertemplate="<br>".join(
[f"<b>{column}:</b> %{{customdata[{i}]}}" for i, column in enumerate(SCORE_MEMORY_LATENCY_DATA)]
)
)
fig.update_layout(
title={
"text": "Latency vs. Score vs. Memory",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
xaxis_title="Per 1000 Tokens Latency (s)",
yaxis_title="Open LLM Score (%)",
legend_title="LLM Architecture",
width=1200,
height=600,
)
return fig
def create_lat_score_mem_plot(llm_perf_df):
# descriptive text
gr.HTML("πŸ‘† Hover over the points πŸ‘† for additional information. ",elem_id="text")
# get figure
fig = get_lat_score_mem_fig(llm_perf_df)
# create plot
plot = gr.components.Plot(
value=fig,
elem_id="plot",
show_label=False,
)
return plot