File size: 6,777 Bytes
b414398 38210ee b414398 0fade42 0dcfe74 9fc3aec b414398 91e32b4 ea82c60 91e32b4 ea82c60 91e32b4 ea82c60 38210ee 356fe31 363a6cb ea82c60 356fe31 06e8c2c 5ce3551 91e32b4 ea82c60 ec51f9d 91e32b4 38210ee b414398 7c67606 0fade42 8300932 7c67606 0fade42 7c67606 59d0aa9 7c67606 59d0aa9 387cdcd b414398 8282ded 7285c62 8c47a7a b414398 83d1755 356fe31 cc9ebdf 6d1f22b 339ab1a 356fe31 6ab6442 ea82c60 e98b562 06e8c2c 38210ee ea82c60 38210ee e98b562 ea82c60 97d8622 ea82c60 b414398 4fd57b2 8c47a7a 4fd57b2 8c47a7a 4fd57b2 8c47a7a 4fd57b2 e13f0c8 b414398 cc9ebdf b414398 38210ee b414398 eb5a1b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import os
import pandas as pd
import requests
import huggingface_hub
import gradio as gr
data = pd.read_csv("data.csv", dtype="str")
webhook_url = os.environ.get("WEBHOOK_URL")
archlinks = {
"Mamba": "https://arxiv.org/abs/2312.00752",
"RWKV-4": "https://arxiv.org/abs/2305.13048",
"Based": "https://arxiv.org/abs/2402.18668",
"RWKV-5": "https://x.com/BlinkDL_AI/status/1685230712247795713", # paper soon:tm:
"StripedHyena": "https://www.together.ai/blog/stripedhyena-7b", # this is very confusing
}
#def filter_table(cols, name, type, arch, license):
def filter_table(cols, name, type, arch):
tmp = data
# filter
tmp = tmp[tmp["Name"].str.contains(name)]
tmp = tmp[tmp["Type"].isin(type)]
tmp = tmp[tmp["Architecture"].isin(arch)]
#tmp = tmp[tmp["License"].isin(license)]
# prettify
tmp["Type"] = tmp["Type"].apply(lambda x: x[0])
tmp = tmp.rename({"Type": "T"}, axis=1)
tmp["Name"] = tmp["Name"].apply(lambda x: f'<a target="_blank" href="https://huggingface.co/{x}" style="color:var(--link-text-color);text-decoration:underline;text-decoration-style:dotted">{x}</a>')
tmp["Architecture"] = tmp["Architecture"].apply(lambda x: f'<a target="_blank" href="{archlinks[x]}" style="color:var(--link-text-color);text-decoration:underline;text-decoration-style:dotted">{x}</a>')
#tmp["License"] = tmp["License"].apply(lambda x: f'<a target="_blank" href="https://choosealicense.com/licenses/{x}" style="color:var(--link-text-color);text-decoration:underline;text-decoration-style:dotted">{x}</a>')
tmp["Base Model"] = tmp["Base Model"].apply(lambda x: f'<a target="_blank" href="https://huggingface.co/{x}" style="color:var(--link-text-color);text-decoration:underline;text-decoration-style:dotted">{x}</a>' if x != "base" else "")
# show/hide
tmp = tmp.drop(cols, axis=1)
# done!
return tmp
def submit_model(name):
try:
huggingface_hub.hf_hub_download(repo_id=name, filename="config.json") # sanity check input
except huggingface_hub.utils._errors.EntryNotFoundError:
return "# ERROR: Model does not have a config.json file!"
except huggingface_hub.utils._errors.RepositoryNotFoundError:
return "# ERROR: Model could not be found on the Hugging Face Hub!"
except requests.exceptions.HTTPError:
return "# ERROR: Network error while validating model. Please try again later."
except Exception as e:
print(e)
return "ERROR: Unexpected error. Please try again later."
try:
result = requests.post(webhook_url, json={"content":name})
except requests.exceptions.HTTPError:
return "# ERROR: Network error while contacting queue. Please try again in a few minutes."
except Exception as e:
print(e)
return "ERROR: Unexpected error. Please try again later."
return "# SUCCESS: Please wait up to 24 hours for your model to be added to the queue."
with gr.Blocks(css=".tab-buttons button{font-size:1.3em}") as demo:
gr.HTML('<h1 style="text-align:center"><span style="font-size:1.3em">Subquadratic LLM Leaderboard</span></h1>')
gr.Markdown("**REMEMBER:** If you don't see an eligible model here, make sure to submit it! We hope to incentivize subquadratic/attention-free LLM development through friendly competition.")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.Tab("🏅 LLM Benchmark"):
with gr.Row():
with gr.Column():
namefilter = gr.Textbox(max_lines=1, placeholder="Search by model name and hit Enter...", show_label=False)
typefilter = gr.CheckboxGroup(show_label=False, choices=list(data["Type"].unique()), value=[n for n in data["Type"].unique() if n not in ["⌛ Pending"]])
with gr.Column():
archfilter = gr.CheckboxGroup(label="Filter by model architecture", choices=list(data["Architecture"].unique()), value=list(data["Architecture"].unique()))
#lcnsfilter = gr.CheckboxGroup(label="Filter by model license", choices=list(data["License"].unique()), value=list(data["License"].unique()))
with gr.Column():
colfilter = gr.CheckboxGroup(label="Hide columns", choices=list(data.columns)[2:])
#table = gr.Dataframe(filter_table([],"",[n for n in data["Type"].unique() if n not in ["⌛ Pending"]],list(data["Architecture"].unique()),list(data["License"].unique())), datatype="markdown")
table = gr.Dataframe(filter_table([],"",[n for n in data["Type"].unique() if n not in ["⌛ Pending"]],list(data["Architecture"].unique())), datatype="markdown")
# actions
#namefilter.submit(filter_table, [colfilter,namefilter,typefilter,archfilter,lcnsfilter], table)
namefilter.submit(filter_table, [colfilter,namefilter,typefilter,archfilter], table)
#for filter in [colfilter,typefilter,archfilter,lcnsfilter]:
for filter in [colfilter,typefilter,archfilter]:
#filter.input(filter_table, [colfilter,namefilter,typefilter,archfilter,lcnsfilter], table)
filter.input(filter_table, [colfilter,namefilter,typefilter,archfilter], table)
with gr.Tab("📝 About"):
gr.Markdown("""
The **Subquadratic LLM Leaderboard** evaluates LLMs with subquadratic/attention-free architectures (i.e. RWKV & Mamba) with the goal of providing open
evaluation results while the architectures themselves are pending inclusion/release in the 🤗 Transformers library.
The metrics are the same as the Open LLM Leaderboard: ARC 25-shot, HellaSwag 10-shot, MMLU 5-shot, TruthfulQA zeroshot, Winogrande 5-shot, and GSM8K 5-shot.
This leaderboard is maintained by Devin Gulliver and is perpetually under construction, check back regularly for further improvements!
Compute for evaluating RWKV models is generously provided by [Recursal AI](https://recursal.ai).
""")
with gr.Tab("🚀 Submit here!"):
with gr.Group():
with gr.Row():
model_name = gr.Textbox(max_lines=1, placeholder="Enter model name...", show_label=False, scale=4)
submit = gr.Button("Submit", variant="primary", scale=0)
output = gr.Markdown("Enter a public HF repo id, then hit Submit to add it to the evaluation queue.")
submit.click(fn=submit_model, inputs=model_name, outputs=output)
demo.launch(show_api=False, allowed_paths=["data.csv"]) |