Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,38 +1,42 @@
|
|
1 |
import streamlit as st
|
2 |
import pandas as pd
|
3 |
-
from huggingface_hub import HfApi
|
4 |
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
|
5 |
import re
|
6 |
from io import StringIO
|
7 |
from yall import create_yall
|
8 |
import plotly.graph_objs as go
|
9 |
-
from huggingface_hub import ModelCard
|
10 |
|
11 |
def calculate_pages(df, items_per_page):
|
|
|
12 |
return -(-len(df) // items_per_page) # Equivalent to math.ceil(len(df) / items_per_page)
|
13 |
|
14 |
@st.cache_data
|
15 |
def cached_model_info(_api, model):
|
|
|
16 |
try:
|
17 |
return _api.model_info(repo_id=str(model))
|
18 |
-
except (RepositoryNotFoundError, RevisionNotFoundError):
|
|
|
19 |
return None
|
20 |
|
21 |
@st.cache_data
|
22 |
def get_model_info(df):
|
|
|
23 |
api = HfApi()
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
return df
|
34 |
|
35 |
def convert_markdown_table_to_dataframe(md_content):
|
|
|
36 |
cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)
|
37 |
df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')
|
38 |
df = df.drop(0, axis=0)
|
@@ -43,46 +47,57 @@ def convert_markdown_table_to_dataframe(md_content):
|
|
43 |
return df
|
44 |
|
45 |
def create_bar_chart(df, category):
|
|
|
46 |
st.write(f"### {category} Scores")
|
47 |
sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
|
48 |
fig = go.Figure(go.Bar(
|
49 |
x=sorted_df[category],
|
50 |
y=sorted_df['Model'],
|
51 |
orientation='h',
|
52 |
-
marker=dict(color=sorted_df[category], colorscale='
|
|
|
53 |
))
|
54 |
fig.update_layout(
|
55 |
-
margin=dict(l=20, r=20, t=20, b=20)
|
|
|
56 |
)
|
57 |
st.plotly_chart(fig, use_container_width=True, height=len(df) * 35)
|
58 |
|
59 |
def fetch_merge_configs(df):
|
|
|
60 |
df_sorted = df.sort_values(by='Average', ascending=False)
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
file.
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
def main():
|
|
|
86 |
st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
|
87 |
st.title("🏆 YALL - Yet Another LLM Leaderboard")
|
88 |
st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
|
|
|
1 |
import streamlit as st
|
2 |
import pandas as pd
|
3 |
+
from huggingface_hub import HfApi, ModelCard
|
4 |
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
|
5 |
import re
|
6 |
from io import StringIO
|
7 |
from yall import create_yall
|
8 |
import plotly.graph_objs as go
|
|
|
9 |
|
10 |
def calculate_pages(df, items_per_page):
|
11 |
+
"""Calculate the number of pages needed for pagination."""
|
12 |
return -(-len(df) // items_per_page) # Equivalent to math.ceil(len(df) / items_per_page)
|
13 |
|
14 |
@st.cache_data
|
15 |
def cached_model_info(_api, model):
|
16 |
+
"""Fetch model information from the Hugging Face API and cache the result."""
|
17 |
try:
|
18 |
return _api.model_info(repo_id=str(model))
|
19 |
+
except (RepositoryNotFoundError, RevisionNotFoundError) as e:
|
20 |
+
st.error(f"Error fetching model info for {model}: {str(e)}")
|
21 |
return None
|
22 |
|
23 |
@st.cache_data
|
24 |
def get_model_info(df):
|
25 |
+
"""Get model information and update the DataFrame with likes and tags."""
|
26 |
api = HfApi()
|
27 |
+
with st.spinner("Fetching model information..."):
|
28 |
+
for index, row in df.iterrows():
|
29 |
+
model_info = cached_model_info(api, row['Model'].strip())
|
30 |
+
if model_info:
|
31 |
+
df.loc[index, 'Likes'] = model_info.likes
|
32 |
+
df.loc[index, 'Tags'] = ', '.join(model_info.tags)
|
33 |
+
else:
|
34 |
+
df.loc[index, 'Likes'] = -1
|
35 |
+
df.loc[index, 'Tags'] = ''
|
36 |
return df
|
37 |
|
38 |
def convert_markdown_table_to_dataframe(md_content):
|
39 |
+
"""Convert a markdown table to a pandas DataFrame."""
|
40 |
cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)
|
41 |
df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')
|
42 |
df = df.drop(0, axis=0)
|
|
|
47 |
return df
|
48 |
|
49 |
def create_bar_chart(df, category):
|
50 |
+
"""Create a horizontal bar chart for the specified category."""
|
51 |
st.write(f"### {category} Scores")
|
52 |
sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
|
53 |
fig = go.Figure(go.Bar(
|
54 |
x=sorted_df[category],
|
55 |
y=sorted_df['Model'],
|
56 |
orientation='h',
|
57 |
+
marker=dict(color=sorted_df[category], colorscale='Viridis'),
|
58 |
+
hoverinfo='x+y'
|
59 |
))
|
60 |
fig.update_layout(
|
61 |
+
margin=dict(l=20, r=20, t=20, b=20),
|
62 |
+
title=f"Leaderboard for {category} Scores"
|
63 |
)
|
64 |
st.plotly_chart(fig, use_container_width=True, height=len(df) * 35)
|
65 |
|
66 |
def fetch_merge_configs(df):
|
67 |
+
"""Fetch and save merge configurations for the top models."""
|
68 |
df_sorted = df.sort_values(by='Average', ascending=False)
|
69 |
+
try:
|
70 |
+
with open('/tmp/configurations.txt', 'a') as file:
|
71 |
+
for index, row in df_sorted.head(20).iterrows():
|
72 |
+
model_name = row['Model'].rstrip()
|
73 |
+
try:
|
74 |
+
card = ModelCard.load(model_name)
|
75 |
+
file.write(f'Model Name: {model_name}\n')
|
76 |
+
file.write(f'Scores: {row["Average"]}\n')
|
77 |
+
file.write(f'AGIEval: {row["AGIEval"]}\n')
|
78 |
+
file.write(f'GPT4All: {row["GPT4All"]}\n')
|
79 |
+
file.write(f'TruthfulQA: {row["TruthfulQA"]}\n')
|
80 |
+
file.write(f'Bigbench: {row["Bigbench"]}\n')
|
81 |
+
file.write(f'Model Card: {card}\n')
|
82 |
+
except Exception as e:
|
83 |
+
st.error(f"Error loading model card for {model_name}: {str(e)}")
|
84 |
+
with open('/tmp/configurations.txt', 'r') as file:
|
85 |
+
content = file.read()
|
86 |
+
matches = re.findall(r'yaml(.*?)```', content, re.DOTALL)
|
87 |
+
with open('/tmp/configurations2.txt', 'w') as file:
|
88 |
+
for row, match in zip(df_sorted[['Model', 'Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']].head(20).values, matches):
|
89 |
+
file.write(f'Model Name: {row[0]}\n')
|
90 |
+
file.write(f'Scores: {row[1]}\n')
|
91 |
+
file.write(f'AGIEval: {row[2]}\n')
|
92 |
+
file.write(f'GPT4All: {row[3]}\n')
|
93 |
+
file.write(f'TruthfulQA: {row[4]}\n')
|
94 |
+
file.write(f'Bigbench: {row[5]}\n')
|
95 |
+
file.write('yaml' + match + '```\n')
|
96 |
+
except Exception as e:
|
97 |
+
st.error(f"Error while fetching merge configs: {str(e)}")
|
98 |
|
99 |
def main():
|
100 |
+
"""Main function to set up the Streamlit app and display the leaderboard."""
|
101 |
st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
|
102 |
st.title("🏆 YALL - Yet Another LLM Leaderboard")
|
103 |
st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
|