|
from huggingface_hub import HfApi, HfFileSystem |
|
import re |
|
from tqdm import tqdm |
|
import concurrent.futures |
|
import gradio as gr |
|
import datetime |
|
import pandas as pd |
|
|
|
api = HfApi() |
|
fs = HfFileSystem() |
|
|
|
|
|
text = f""" |
|
π― The Leaderboard aims to track TheBloke's quantisied models. |
|
|
|
## π Type Of Models |
|
|
|
- GPTQ |
|
|
|
- GGUF |
|
|
|
- AWQ |
|
|
|
- GGML |
|
|
|
## π οΈ Backend |
|
|
|
The leaderboard's backend mainly runs on the [Hugging Face Hub API](https://huggingface.co/docs/huggingface_hub/v0.5.1/en/package_reference/hf_api). |
|
|
|
## π Searching |
|
|
|
You can search for author or a spesific model using the search bar. |
|
|
|
## β Last Update |
|
|
|
This space is last updated in **{str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))}**. |
|
|
|
## π Important Note |
|
|
|
This space potentially includes incorrectly quantisied models for a model. |
|
|
|
If you find any incorrectly quantisied model, please report it to me. |
|
""" |
|
|
|
quant_models = [i.__dict__['id'] for i in api.list_models(author="TheBloke") if "GPTQ" in i.__dict__['id'] or "GGUF" in i.__dict__['id'] or "AWQ" in i.__dict__['id'] or "GGML" in i.__dict__['id']] |
|
|
|
|
|
pattern = r'\(https://huggingface\.co/([^/]+)/([^/]+)\)' |
|
liste = {} |
|
|
|
def process_model(i, pattern, liste): |
|
text = fs.read_text(i + "/README.md") |
|
matches = re.search(pattern, text) |
|
|
|
if matches: |
|
author = matches.group(1) |
|
model_name = matches.group(2) |
|
full_id = (author + "/" + model_name).split(")")[0] |
|
|
|
try: |
|
liste[full_id].append(i) |
|
except KeyError: |
|
liste[full_id] = [i] |
|
|
|
|
|
num_threads = 64 |
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: |
|
futures = [] |
|
for i in quant_models: |
|
future = executor.submit(process_model, i, pattern, liste) |
|
futures.append(future) |
|
|
|
concurrent.futures.wait(futures) |
|
|
|
|
|
authors, models, gptq, gguf, awq, ggml = [], [], [], [], [], [] |
|
|
|
|
|
for model, values in liste.items(): |
|
models.append(model) |
|
|
|
gptq_value, gguf_value, awq_value, ggml_value = None, None, None, None |
|
|
|
for value in values: |
|
if "-GPTQ" in value: |
|
gptq_value = value |
|
elif "-GGUF" in value: |
|
gguf_value = value |
|
elif "-AWQ" in value: |
|
awq_value = value |
|
elif "-GGML" in value: |
|
ggml_value = value |
|
|
|
authors.append(model.split('/')[0]) |
|
gptq.append(gptq_value) |
|
gguf.append(gguf_value) |
|
awq.append(awq_value) |
|
ggml.append(ggml_value) |
|
|
|
|
|
df = pd.DataFrame({'π€ Author Name': authors, 'π€ Model Name': models, 'π GPTQ': gptq, 'π₯ GGUF': gguf, 'π€·ββοΈ AWQ': awq, 'π GGML': ggml}) |
|
|
|
|
|
def search(search_text): |
|
if not search_text: |
|
return df |
|
|
|
if len(search_text.split('/'))>1: |
|
return df[df['π€ Model Name'] == clickable(search_text)] |
|
else: |
|
return df[df['π€ Author Name'] == clickable(search_text)] |
|
|
|
|
|
def clickable(x): |
|
return None if not x else f'<a target="_blank" href="https://huggingface.co/{x}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{x}</a>' |
|
|
|
|
|
def to_clickable(df): |
|
for column in list(df.columns): |
|
df[column] = df[column].apply(lambda x: clickable(x)) |
|
return df |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("""<center><img src = "https://cdn-uploads.huggingface.co/production/uploads/6426d3f3a7723d62b53c259b/tvPikpAzKTKGN5wrpadOJ.jpeg" width=200 height=200></center>""") |
|
gr.Markdown("""<h1 align="center" id="space-title">The Bloke Quantisied Models</h1>""") |
|
gr.Markdown(text) |
|
|
|
with gr.Column(min_width=320): |
|
search_bar = gr.Textbox(placeholder="π Search for a author or a spesific model", show_label=False) |
|
|
|
|
|
df_clickable = to_clickable(df) |
|
gr_df = gr.Dataframe(df_clickable, interactive=False, datatype=["markdown"]*len(df.columns)) |
|
|
|
search_bar.submit(fn=search, inputs=search_bar, outputs=gr_df) |
|
|
|
|
|
demo.launch() |