TheXeos's picture
Add elo compute
6ac0a0d
raw
history blame
2.26 kB
import json
import requests
from datasets import load_dataset
import gradio as gr
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load
import pandas as pd
from matchmaking import *
block = gr.Blocks()
envs = [
{
"name": "Snowball-Fight",
"global": None,
},
]
matchmaking = Matchmaking()
def update_elos():
matchmaking.read_history()
matchmaking.compute_elo()
matchmaking.save_elo_data()
def get_env_data(env_name) -> pd.DataFrame:
# data = pd.read_csv(f"env_elos/{env_name}.csv")
data = pd.DataFrame(columns=["user", "model", "elo", "games_played"])
return data
with block:
gr.Markdown(f"""
# ๐Ÿ† The Deep Reinforcement Learning Course Leaderboard ๐Ÿ†
This is the leaderboard of trained agents during the Deep Reinforcement Learning Course. A free course from beginner to expert.
Just choose which environment you trained your agent on and with Ctrl+F find your rank ๐Ÿ†
We use an ELO rating to sort the models.
You **can click on the model's name** to be redirected to its model card which includes documentation.
๐Ÿค– You want to try to train your agents? <a href="http://eepurl.com/ic5ZUD" target="_blank">Sign up to the Hugging Face free Deep Reinforcement Learning Course ๐Ÿค— </a>.
You want to compare two agents? <a href="https://huggingface.co/spaces/ThomasSimonini/Compare-Reinforcement-Learning-Agents" target="_blank">It's possible using this Spaces demo ๐Ÿ‘€ </a>.
๐Ÿ”ง There is an **environment missing?** Please open an issue.
""")
for i, env in enumerate(envs):
with gr.TabItem(env["name"]) as tab:
with gr.Row():
refresh_data = gr.Button("Refresh")
val = gr.Variable(value=[env["name"]])
refresh_data.click(get_env_data, inputs=[val], outputs=env["global"])
with gr.Row():
env["global"] = gr.components.DataFrame(
get_env_data(env["name"]),
headers=["Ranking ๐Ÿ†", "User ๐Ÿค—", "Model id ๐Ÿค–", "ELO ๐Ÿ†", "Games played ๐ŸŽฎ"],
datatype=["number", "markdown", "markdown", "number", "number"]
)
block.launch()