InstruSumEval / app.py
henryL7's picture
clean up repo
9767697
raw
history blame
2.47 kB
import gradio as gr
from apscheduler.schedulers.background import BackgroundScheduler
from src.about import (
CITATION_BUTTON_LABEL,
CITATION_BUTTON_TEXT,
INTRODUCTION_TEXT,
LLM_BENCHMARKS_TEXT,
TITLE,
)
from src.display.css_html_js import custom_css
from src.envs import API, REPO_ID, TOKEN
from src import populate
def restart_space():
print(TOKEN)
API.restart_space(repo_id=REPO_ID, token=TOKEN)
# restart_space()
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
# with gr.Column(scale=5):
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
# with gr.Column(scale=2):
# gr.Markdown("""
# ![](src/logo.png)
# """)
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
leaderboard_df = populate.load_leaderboard()
leaderboard_table = gr.components.Dataframe(
value=leaderboard_df,
# headers=dummy_leaderboard.HEADERS,
headers=["", "Model", "Accuracy", "Agreement", "Self-Accuracy", "Self-Agreement"],
datatype=populate.TYPES,
elem_id="leaderboard-table",
interactive=False,
visible=True,
height=600,
)
with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
lines=6,
elem_id="citation-button",
show_copy_button=True,
)
# with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
# with gr.Column():
# with gr.Row():
# gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
# with gr.Row():
# gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=18000)
scheduler.start()
demo.queue(default_concurrency_limit=40).launch()