Spaces:
Running
Running
import gradio as gr | |
from gradio_leaderboard import Leaderboard | |
from pathlib import Path | |
import pandas as pd | |
import os | |
import json | |
from envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO | |
def submit(model_name, model_id, challenge, submission_id, paper_link, architecture, license): | |
if model_name == "" or model_id == "" or challenge == "" or architecture == "" or license == "": | |
gr.Error("Please fill all the fields") | |
return | |
if submission_id == "" and paper_link =="": | |
gr.Error("Provide either a link to a paper describing the method or a submission ID for the MLSB workshop.") | |
return | |
try: | |
user_name = "" | |
if "/" in model_id: | |
user_name = model_id.split("/")[0] | |
model_path = model_id.split("/")[1] | |
eval_entry = { | |
"model_name": model_name, | |
"model_id": model_id, | |
"challenge": challenge, | |
"submission_id": submission_id, | |
"architecture": architecture, | |
"license": license | |
} | |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}" | |
os.makedirs(OUT_DIR, exist_ok=True) | |
out_path = f"{OUT_DIR}/{user_name}_{model_path}.json" | |
with open(out_path, "w") as f: | |
f.write(json.dumps(eval_entry)) | |
print("Uploading eval file") | |
API.upload_file( | |
path_or_fileobj=out_path, | |
path_in_repo=out_path.split("eval-queue/")[1], | |
repo_id=QUEUE_REPO, | |
repo_type="dataset", | |
commit_message=f"Add {model_name} to eval queue", | |
) | |
gr.Info("Successfully submitted", duration=10) | |
# Remove the local file | |
os.remove(out_path) | |
except: | |
gr.Error("Error submitting the model") | |
abs_path = Path(__file__).parent | |
# Any pandas-compatible data | |
pinder_df = pd.read_json(str(abs_path / "leaderboard_pinder.json")) | |
plinder_df = pd.read_json(str(abs_path / "leaderboard_plinder.json")) | |
with gr.Blocks() as demo: | |
gr.Markdown(""" | |
# MLSB 2024 Challenges | |
Please find more details about the challenge on [mlsb.io/#challenge](https://www.mlsb.io/#challenge). | |
This competition is run together with VantAI, NVidia, Huggingface & University of Basel. | |
""") | |
with gr.Tab("ποΈ PINDER Leaderboard"): | |
gr.Markdown("""## PINDER Leaderboard | |
Evaluating Protein-Protein interaction prediction | |
""") | |
Leaderboard( | |
value=pinder_df, | |
select_columns=["Arch", "Model", "L_rms", "I_rms", | |
"F_nat", "DOCKQ", "CAPRI"], | |
search_columns=["model_name_for_query"], | |
hide_columns=["model_name_for_query",], | |
filter_columns=["Arch"], | |
) | |
with gr.Tab("π₯ PLINDER Leaderboard"): | |
gr.Markdown("""## PLINDER Leaderboard | |
Evaluating Protein-Ligand prediction | |
""") | |
Leaderboard( | |
value=plinder_df, | |
select_columns=["Arch", "Model", "Mean lDDT-PLI", "Median RMSD", | |
"Success Rate (% lDDT-PLI >= 0.7)"], | |
search_columns=["model_name_for_query"], | |
hide_columns=["model_name_for_query",], | |
filter_columns=["Arch"], | |
) | |
with gr.Tab("βοΈ Submit"): | |
gr.Markdown("""## Submit your model | |
Submit your model to the leaderboard | |
""") | |
model_name = gr.Textbox(label="Model name") | |
model_id = gr.Textbox(label="username/space e.g mlsb/alphafold3") | |
challenge = gr.Radio(choices=["PINDER", "PLINDER"],label="Challenge") | |
gr.Markdown("Either give a submission id if you submitted to the MLSB workshop or provide a link to the preprint/paper describing the method.") | |
with gr.Row(): | |
submission_id = gr.Textbox(label="Submission ID on CMT") | |
paper_link = gr.Textbox(label="Preprint or Paper link") | |
architecture = gr.Dropdown(choices=["GNN", "CNN", "Physics-based", "Other"],label="Model architecture") | |
license = gr.Dropdown(choices=["mit", "apache-2.0", "gplv2", "gplv3", "lgpl", "mozilla", "bsd", "other"],label="License") | |
submit_btn = gr.Button("Submit") | |
submit_btn.click(submit, inputs=[model_name, model_id, challenge, submission_id, paper_link, architecture, license], outputs=[]) | |
if __name__ == "__main__": | |
demo.launch() |