Spaces:
Runtime error
Runtime error
File size: 11,298 Bytes
a557d54 c00ae85 1b95f45 c00ae85 a557d54 c00ae85 5f44f14 c00ae85 a557d54 1b95f45 c00ae85 1b95f45 a557d54 c00ae85 1b95f45 338fec2 5f44f14 c00ae85 1b95f45 59f829c 5597cc4 c00ae85 30c6c6d 0ef1d60 30c6c6d 1b95f45 2acc05f bda069e 2acc05f bda069e 2acc05f bda069e 2acc05f bda069e 2acc05f a557d54 c00ae85 6d09ca9 55a3478 a557d54 6d09ca9 55a3478 0ef1d60 0398e81 55a3478 30c6c6d a557d54 a353f77 59f829c a353f77 9d75c96 a353f77 9757ddd 1b95f45 a353f77 a557d54 6d09ca9 5f96f95 1b95f45 5f96f95 5597cc4 5f96f95 1b95f45 5f96f95 1b95f45 5f96f95 1b95f45 5f96f95 1b95f45 5f96f95 1b95f45 5f96f95 1b95f45 5f96f95 1b95f45 5f96f95 1b95f45 5f44f14 1b95f45 5f44f14 1b95f45 5f44f14 5f96f95 1b95f45 5f96f95 55a3478 5f96f95 5f44f14 55a3478 c00ae85 30c6c6d 338fec2 5f44f14 0398e81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 |
import json
import os
import shutil
import uuid
from datetime import datetime
from pathlib import Path
import jsonlines
import streamlit as st
from dotenv import load_dotenv
from huggingface_hub import Repository, cached_download, hf_hub_url
from utils import http_get, http_post, validate_json
if Path(".env").is_file():
load_dotenv(".env")
HF_TOKEN = os.getenv("HF_TOKEN")
AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
LOCAL_REPO = "submission_repo"
LOGS_REPO = "submission-logs"
# TODO
# 1. Add check that fields are nested under `tasks` field correctly
# 2. Add check that names of tasks and datasets are valid
MARKDOWN = """---
benchmark: gem
type: prediction
submission_name: {submission_name}
tags:
- evaluation
- benchmark
---
# GEM Submission
Submission name: {submission_name}
"""
def generate_dataset_card(submission_name):
"""
Generate dataset card for the submission
"""
markdown = MARKDOWN.format(
submission_name=submission_name,
)
with open(os.path.join(LOCAL_REPO, "README.md"), "w") as f:
f.write(markdown)
def load_json(path):
with open(path, "r") as f:
return json.load(f)
def get_submission_names():
"""Download all submission names.
The GEM frontend requires the submission names to be unique, so here we
download all submission names and use them as a check against the user
submissions.
"""
scores_url = hf_hub_url("GEM-submissions/submission-scores", "scores.json", repo_type="dataset")
scores_filepath = cached_download(scores_url, force_download=True)
scores_data = load_json(scores_filepath)
return [score["submission_name"] for score in scores_data]
#######
# APP #
#######
st.title("GEM Submissions")
st.markdown(
"""
Welcome to the [GEM benchmark](https://gem-benchmark.com/)! GEM is a benchmark
environment for Natural Language Generation with a focus on its Evaluation, both
through human annotations and automated Metrics.
GEM aims to:
- measure NLG progress across many NLG tasks across languages.
- audit data and models and present results via data cards and model robustness
reports.
- develop standards for evaluation of generated text using both automated and
human metrics.
Use this page to submit your system's predictions to the benchmark.
"""
)
with st.form(key="form"):
# Flush local repo
shutil.rmtree(LOCAL_REPO, ignore_errors=True)
submission_errors = 0
uploaded_file = st.file_uploader("Upload submission file", type=["json"])
if uploaded_file:
data = str(uploaded_file.read(), "utf-8")
json_data = json.loads(data)
submission_names = get_submission_names()
submission_name = json_data["submission_name"]
if submission_name in submission_names:
st.error(f"π Submission name `{submission_name}` is already taken. Please rename your submission.")
submission_errors += 1
else:
is_valid, message = validate_json(json_data)
if is_valid:
st.success(message)
else:
st.error(message)
submission_errors += 1
with st.expander("Submission format"):
st.markdown(
"""
Please follow this JSON format for your `submission.json` file:
```json
{
"submission_name": "An identifying name of your system",
"param_count": 123, # The number of parameters your system has.
"description": "An optional brief description of the system that will be shown on the results page",
"tasks":
{
"dataset_identifier": {
"values": ["output-0", "output-1", "..."], # A list of system outputs.
"keys": ["gem_id-0", "gem_id-1", ...] # A list of GEM IDs.
}
}
}
```
Here, `dataset_identifier` is the identifier of the dataset followed by
an identifier of the set the outputs were created from, for example
`_validation` or `_test`. For example, the `mlsum_de` test set has the
identifier `mlsum_de_test`. The `keys` field is needed to avoid
accidental shuffling that will impact your metrics. Simply add a list of
IDs from the `gem_id` column of each evaluation dataset in the same
order as your values. Please see the sample submission below:
"""
)
with open("sample-submission.json", "r") as f:
example_submission = json.load(f)
st.json(example_submission)
user_name = st.text_input("Enter your π€ Hub username", help="This field is required to track your submission and cannot be empty")
submit_button = st.form_submit_button("Make Submission")
if submit_button and submission_errors == 0:
with st.spinner("β³ Preparing submission for evaluation ..."):
submission_name = json_data["submission_name"]
submission_name_formatted = submission_name.lower().replace(" ", "-").replace("/", "-")
submission_time = str(int(datetime.now().timestamp()))
# Create submission dataset under benchmarks ORG
submission_repo_id = f"GEM-submissions/{user_name}__{submission_name_formatted}__{submission_time}"
dataset_repo_url = f"https://huggingface.co/datasets/{submission_repo_id}"
repo = Repository(
local_dir=LOCAL_REPO,
clone_from=dataset_repo_url,
repo_type="dataset",
private=False,
use_auth_token=HF_TOKEN,
)
generate_dataset_card(submission_name)
with open(f"{LOCAL_REPO}/submission.json", "w", encoding="utf-8") as f:
json.dump(json_data, f)
# TODO: add informative commit msg
commit_url = repo.push_to_hub()
if commit_url is not None:
commit_sha = commit_url.split("/")[-1]
else:
commit_sha = repo.git_head_commit_url().split("/")[-1]
submission_id = submission_name + "__" + str(uuid.uuid4())[:6] + "__" + submission_time
# Define AutoTrain payload
project_config = {}
# Need a dummy dataset to use the dataset loader in AutoTrain
project_config["dataset_name"] = "lewtun/imdb-dummy"
project_config["dataset_config"] = "lewtun--imdb-dummy"
project_config["dataset_split"] = "train"
project_config["col_mapping"] = {"text": "text", "label": "target"}
# Specify benchmark parameters
project_config["model"] = "gem"
project_config["dataset"] = "GEM/references"
project_config["submission_dataset"] = submission_repo_id
project_id = str(uuid.uuid4()).split("-")[0]
project_payload = {
"username": AUTOTRAIN_USERNAME,
"proj_name": f"benchmark-gem-{project_id}",
"task": 1,
"config": {
"language": "en",
"max_models": 5,
"instance": {
"provider": "aws",
"instance_type": "ml.g4dn.4xlarge",
"max_runtime_seconds": 172800,
"num_instances": 1,
"disk_size_gb": 150,
},
"benchmark": {
"dataset": project_config["dataset"],
"model": project_config["model"],
"submission_dataset": project_config["submission_dataset"],
},
},
}
project_json_resp = http_post(
path="/projects/create", payload=project_payload, token=HF_TOKEN, domain=AUTOTRAIN_BACKEND_API
).json()
print(f"Project creation: {project_json_resp}")
# Upload data
payload = {
"split": 4,
"col_mapping": project_config["col_mapping"],
"load_config": {"max_size_bytes": 0, "shuffle": False},
}
data_json_resp = http_post(
path=f"/projects/{project_json_resp['id']}/data/{project_config['dataset_name']}",
payload=payload,
token=HF_TOKEN,
domain=AUTOTRAIN_BACKEND_API,
params={
"type": "dataset",
"config_name": project_config["dataset_config"],
"split_name": project_config["dataset_split"],
},
).json()
print(f"Dataset creation: {data_json_resp}")
# Run training
train_json_resp = http_get(
path=f"/projects/{project_json_resp['id']}/data/start_process",
token=HF_TOKEN,
domain=AUTOTRAIN_BACKEND_API,
).json()
print(f"Training job response: {train_json_resp}")
logs_repo_url = f"https://huggingface.co/datasets/GEM-submissions/{LOGS_REPO}"
logs_repo = Repository(
local_dir=LOGS_REPO,
clone_from=logs_repo_url,
repo_type="dataset",
private=True,
use_auth_token=HF_TOKEN,
)
evaluation_log = {}
evaluation_log["payload"] = project_payload
evaluation_log["project_creation_response"] = project_json_resp
evaluation_log["dataset_creation_response"] = data_json_resp
evaluation_log["autotrain_job_response"] = train_json_resp
with jsonlines.open(f"{LOGS_REPO}/logs.jsonl") as r:
lines = []
for obj in r:
lines.append(obj)
lines.append(evaluation_log)
with jsonlines.open(f"{LOGS_REPO}/logs.jsonl", mode="w") as writer:
for job in lines:
writer.write(job)
logs_repo.push_to_hub(commit_message=f"Submission with job ID {project_json_resp['id']}")
if train_json_resp["success"] == 1:
st.success(
f"β
Submission {submission_name} was successfully submitted for evaluation!"
)
st.markdown(
f"""
Evaluation can take up to 1 hour to complete, so grab a β or π΅ while you wait:
* π Click [here](https://huggingface.co/spaces/GEM/results) to view the results from your submission
* πΎ Click [here]({dataset_repo_url}) to view your submission file on the Hugging Face Hub
Please [contact the organisers](mailto:gehrmann@google.com) if you would like your submission and/or evaluation scores deleted.
"""
)
else:
st.error(
"π Oh noes, there was an error submitting your submission! Please [contact the organisers](mailto:gehrmann@google.com)"
)
# # Flush local repos
shutil.rmtree(LOCAL_REPO, ignore_errors=True)
shutil.rmtree(LOGS_REPO, ignore_errors=True)
with st.expander("Download all submissions and scores"):
st.markdown("Click the button below if you'd like to download all the submissions and evaluations from GEM:")
outputs_url = hf_hub_url(
"GEM-submissions/v2-outputs-and-scores", "gem-v2-outputs-and-scores.zip", repo_type="dataset"
)
outputs_filepath = cached_download(outputs_url)
with open(outputs_filepath, "rb") as f:
btn = st.download_button(label="Download submissions and scores", data=f, file_name="outputs-and-scores.zip")
|