Spaces:
Sleeping
Sleeping
File size: 1,831 Bytes
ab8e13c 0131356 ab8e13c 0131356 ab8e13c 0131356 ab8e13c 0131356 ab8e13c 0131356 7e979af ab8e13c 0131356 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import gradio as gr
from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter
from pathlib import Path
import pandas as pd
import random
import requests
import json
data = requests.get("https://raw.githubusercontent.com/BerriAI/litellm/refs/heads/main/model_prices_and_context_window.json").json()
if 'sample_spec' in data:
del data['sample_spec']
df = pd.DataFrame.from_dict(data, orient='index')
df = df.reset_index()
filters = []
df['deprecation_date'] = pd.to_datetime(df['deprecation_date'], errors='coerce')
df = df[df['deprecation_date'].isna() | (df['deprecation_date'] > pd.Timestamp.now())]
for col in df.columns:
if 'supports_' in col:
filters.append(ColumnFilter(col, type="boolean", default=False))
if col=='metadata':
try:
df[col] = df[col].apply(lambda x: json.dumps(x))
except:
pass
if 'cost_per_token' in col:
df[col] = df[col] * 1000000
df = df.rename(columns={col: col.replace('cost_per_token', 'cost_per_M_tokens')})
df = df.rename(columns={'index': 'model_name'})
# print(df.head())
with gr.Blocks() as demo:
gr.Markdown("""
# 🥇 LLM Comparison (LiteLLM)
""")
Leaderboard(
value=df,
select_columns=SelectColumns(
default_selection=['model_name','max_input_tokens','max_output_tokens','input_cost_per_M_tokens','output_cost_per_M_tokens','tpm','rpm','rpd'],
# cant_deselect=["model_name"],
label="Select Columns to Display:",
),
search_columns=["model_name"],
# hide_columns=["model_name_for_query", "Model Size"],
filter_columns=[
"mode"
]+filters,
# datatype=config.TYPES,
# column_widths=["33%"],
height=1920,
)
if __name__ == "__main__":
demo.launch() |